id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
8973 | from argparse import ArgumentParser
import os
import numpy as np
from joblib import dump
from mldftdat.workflow_utils import SAVE_ROOT
from mldftdat.models.gp import *
from mldftdat.data import load_descriptors, filter_descriptors
import yaml
def parse_settings(args):
fname = args.datasets_list[0]
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
with open(os.path.join(fname, 'settings.yaml'), 'r') as f:
d = yaml.load(f, Loader=yaml.Loader)
args.gg_a0 = d.get('a0')
args.gg_amin = d.get('amin')
args.gg_facmul = d.get('fac_mul')
def parse_dataset(args, i, val=False):
if val:
fname = args.validation_set[2*i]
n = int(args.validation_set[2*i+1])
else:
fname = args.datasets_list[2*i]
n = int(args.datasets_list[2*i+1])
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
X, y, rho_data = load_descriptors(fname)
if val:
# offset in case repeat datasets are used
X, y, rho_data = X[n//2+1:,:], y[n//2+1:], rho_data[:,n//2+1:]
X, y, rho, rho_data = filter_descriptors(X, y, rho_data,
tol=args.density_cutoff)
print(X.shape, n)
if args.randomize:
inds = np.arange(X.shape[0])
np.random.shuffle(inds)
X = X[inds,:]
y = y[inds]
rho = rho[inds]
rho_data = rho_data[:,inds]
return X[::n,:], y[::n], rho[::n], rho_data[:,::n]
def parse_list(lststr, T=int):
return [T(substr) for substr in lststr.split(',')]
def main():
parser = ArgumentParser(description='Trains a GP exchange model')
parser.add_argument('save_file', type=str)
parser.add_argument('feature_file', type=str,
help='serialized FeatureList object in yaml format')
parser.add_argument('datasets_list', nargs='+',
help='pairs of dataset names and inverse sampling densities')
parser.add_argument('basis', metavar='basis', type=str,
help='basis set code')
parser.add_argument('--functional', metavar='functional', type=str, default=None,
help='exchange-correlation functional, HF for Hartree-Fock')
parser.add_argument('-r', '--randomize', action='store_true')
parser.add_argument('-c', '--density-cutoff', type=float, default=1e-4)
#parser.add_argument('-m', '--model-class', type=str, default=None)
#parser.add_argument('-k', '--kernel', help='kernel initialization strategy', type=str, default=None)
parser.add_argument('-s', '--seed', help='random seed', default=0, type=int)
parser.add_argument('-vs', '--validation-set', nargs='+')
parser.add_argument('-d', '--delete-k', action='store_true',
help='Delete L (LL^T=K the kernel matrix) to save disk space. Need to refit when reloading to calculate covariance.')
parser.add_argument('--heg', action='store_true', help='HEG exact constraint')
parser.add_argument('--tail', action='store_true', help='atomic tail exact constraint')
parser.add_argument('-o', '--desc-order', default=None,
help='comma-separated list of descriptor order with no spaces. must start with 0,1.')
parser.add_argument('-l', '--length-scale', default=None,
help='comma-separated list initial length-scale guesses')
parser.add_argument('--length-scale-mul', type=float, default=1.0,
help='Used for automatic length-scale initial guess')
parser.add_argument('-a', '--agpr', action='store_true',
help='Whether to use Additive RBF. If False, use RBF')
parser.add_argument('-as', '--agpr-scale', default=None)
parser.add_argument('-ao', '--agpr-order', default=2, type=int)
parser.add_argument('-an', '--agpr-nsingle', default=1, type=int)
parser.add_argument('-x', '--xed-y-code', default='CHACHIYO', type=str)
parser.add_argument('-on', '--optimize-noise', action='store_true',
help='Whether to optimzie exponent of density noise.')
parser.add_argument('-v', '--version', default='c', type=str,
help='version of descriptor set. Default c')
parser.add_argument('--suffix', default=None, type=str,
help='customize data directories with this suffix')
args = parser.parse_args()
parse_settings(args)
np.random.seed(args.seed)
feature_list = FeatureList.load(args.feature_file)
if args.length_scale is not None:
args.length_scale = parse_list(args.length_scale, T=float)
if args.agpr_scale is not None:
args.agpr_scale = parse_list(args.agpr_scale, T=float)
if args.desc_order is not None:
args.desc_order = parse_list(args.desc_order)
assert len(args.datasets_list) % 2 == 0, 'Need pairs of entries for datasets list.'
assert len(args.datasets_list) != 0, 'Need training data'
nd = len(args.datasets_list) // 2
if args.validation_set is None:
nv = 0
else:
assert len(args.validation_set) % 2 == 0, 'Need pairs of entries for datasets list.'
nv = len(args.validation_set) // 2
X, y, rho, rho_data = parse_dataset(args, 0)
for i in range(1, nd):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i)
X = np.append(X, Xn, axis=0)
y = np.append(y, yn, axis=0)
rho = np.append(rho, rhon, axis=0)
rho_data = np.append(rho_data, rho_datan, axis=1)
if nv != 0:
Xv, yv, rhov, rho_datav = parse_dataset(args, 0, val=True)
for i in range(1, nv):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i, val=True)
Xv = np.append(Xv, Xn, axis=0)
yv = np.append(yv, yn, axis=0)
rhov = np.append(rhov, rhon, axis=0)
rho_datav = np.append(rho_datav, rho_datan, axis=1)
gpcls = DFTGPR
gpr = gpcls.from_settings(X, feature_list, args)
gpr.fit(X, y, add_heg=args.heg, add_tail=args.tail)
#if args.heg:
# gpr.add_heg_limit()
print('FINAL KERNEL', gpr.gp.kernel_)
if nv != 0:
pred = gpr.xed_to_y(gpr.predict(Xv), Xv)
abserr = np.abs(pred - gpr.xed_to_y(yv, Xv))
print('MAE VAL SET', np.mean(abserr))
# Always attach the arguments to the object to keep track of settings.
gpr.args = args
if args.delete_k:
gpr.L_ = None
dump(gpr, args.save_file)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1603161 | <filename>bot.py
import discord
from discord.ext import commands
import asyncio
import mysql.connector
import instaloader
import datetime
import schedule
L = instaloader.Instaloader()
USER = 'usernamehere'
# Your preferred way of logging in:
L.load_session_from_file(USER, './session-' + USER)
db = mysql.connector.connect(
host = "localhost",
user ="root",
passwd = "",
database = "igtracker"
)
cursor = db.cursor()
#today
today = datetime.datetime.now().strftime("%Y-%m-%d")
#yesterday
yesterday = datetime.datetime.now() - datetime.timedelta(days = 1)
yesterday = yesterday.strftime("%Y-%m-%d")
def diff(first, second):
second = set(second)
return [item for item in first if item not in second]
client = commands.Bot(command_prefix = ".")
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.command(pass_context=True)
async def testembed(ctx):
print(str(ctx.message.author) + " issued a command testembed")
# https://stackoverflow.com/a/44863263
# https://www.youtube.com/watch?v=XKQWxAaRgG0
embed = discord.Embed(
title = "Title",
description = "Description",
colour = discord.Colour.blue()
)
embed.add_field(name="Field1", value="hi\n123", inline=True)
embed.add_field(name="Field2", value="hi2\n456", inline=True)
await client.say(embed=embed)
@client.command(pass_context=True)
async def adduser(ctx, arg):
print(str(ctx.message.author) + " issued a command adduser")
# args = username
sql = "SELECT * FROM user WHERE username = %s"
adr = (arg,)
cursor.execute(sql, adr)
if not cursor.rowcount:
PROFILE = arg
profile = instaloader.Profile.from_username(L.context, PROFILE)
sql = "INSERT INTO user (username, userid, image, discord_channel) VALUES (%s, %s, %s, %s)"
val = (profile.username, str(profile.userid), profile.profile_pic_url, str(ctx.message.channel.id))
cursor.execute(sql, val)
db.commit()
if (cursor.rowcount >= 1) :
embed = discord.Embed(
title = "Success",
description = "Your Information was added to database.",
colour = 0xBCF4E4
)
embed.add_field(name="Username", value=profile.username, inline=True)
embed.add_field(name="Userid", value=str(profile.userid), inline=True)
embed.add_field(name="Bound to Channel", value=str(ctx.message.channel.id), inline=True)
embed.set_thumbnail(url=profile.profile_pic_url)
await client.say(embed=embed)
else:
embed = discord.Embed(
title = "Failed",
description = "Failed to add your username to database",
colour = 0xECB4D3
)
await client.say(embed=embed)
else:
embed = discord.Embed(
title = "Failed",
description = "Your account has already in a database.",
colour = 0xECB4D3
)
await client.say(embed=embed)
@client.command(pass_context=True)
async def check(ctx):
print(str(ctx.message.author) + " issued a command check")
await checkfollowers()
async def checkfollowers():
today = datetime.datetime.now().strftime("%Y-%m-%d")
print("checkfollowers function trigged")
cursor.execute("SELECT * FROM user")
results = cursor.fetchall()
for result in results:
#Load Profile
PROFILE = result[1]
profile = instaloader.Profile.from_username(L.context, PROFILE)
followers = set(profile.get_followers())
for follower in followers:
sql = "INSERT INTO followers (username, userid, followed_to, insert_at) VALUES (%s, %s, %s, %s)"
val = (follower.username, str(follower.userid), str(profile.userid), today)
cursor.execute(sql, val)
db.commit()
await calculatefollower(result[2],result[4],profile)
#calculatefollower(userid,channelid):
async def calculatefollower(userid,channelid,profile):
followers_list_today = []
followers_list_yesterday = []
#query today
sql = "SELECT * FROM followers WHERE insert_at = %s AND followed_to = %s"
adr = (today, userid)
cursor.execute(sql, adr)
result = cursor.fetchall()
for result in result:
followers_list_today.append(result[2])
#query yesterday
sql = "SELECT * FROM followers WHERE insert_at = %s AND followed_to = %s"
adr = (yesterday, userid)
cursor.execute(sql, adr)
result = cursor.fetchall()
for result in result:
followers_list_yesterday.append(result[2])
#ถ้ามีเมื่อวาน แสดงว่าอันฟอล
#ถ้ามีวันนี้ แสดงว่าพึ่งฟอล
follow = []
unfollow = []
differences = diff(followers_list_today, followers_list_yesterday)
print(differences)
for difference in differences:
if (difference in followers_list_today):
sql = "SELECT * FROM followers WHERE userid = %s ORDER BY id LIMIT 1"
adr = (difference,)
cursor.execute(sql, adr)
result = cursor.fetchall()
for x in result:
follow.append(x[1] + " <" + difference + ">")
elif (difference in followers_list_yesterday):
sql = "SELECT * FROM followers WHERE userid = %s ORDER BY id LIMIT 1"
adr = (difference,)
cursor.execute(sql, adr)
result = cursor.fetchall()
for x in result:
unfollow.append(x[1] + " <" + difference + ">")
index = 0
for i in unfollow:
index += 1
if(index == 1):
unfollowemb = i
else:
unfollowemb += "\n" + i
if (index == 0):
unfollowemb = "-"
index = 0
for i in follow:
index += 1
if(index == 1):
followemb = i
else:
followemb += "\n" + i
if (index == 0):
followemb = "-"
embed = discord.Embed(
title = "Your Instagram Daily Summary",
colour = 0xBCF4E4
)
print(follow)
print(unfollow)
print(followemb)
print(unfollowemb)
embed.add_field(name="Username", value=profile.username, inline=True)
embed.add_field(name="Userid", value=str(profile.userid), inline=True)
embed.add_field(name="Your Follower Count Yesterday", value=str(len(followers_list_yesterday)), inline=False)
embed.add_field(name="Your Follower Count Today", value=str(len(followers_list_today)), inline=False)
embed.add_field(name="Who Unfollow You Today", value=unfollowemb, inline=True)
embed.add_field(name="Who Follow You Today", value=followemb, inline=True)
embed.set_thumbnail(url=profile.profile_pic_url)
await client.send_message(discord.Object(id=str(channelid)), embed=embed)
client.run('token')
| StarcoderdataPython |
4812529 | <reponame>x0rzkov/imsearch<gh_stars>10-100
import os
from pymongo import MongoClient
class MongoRepository:
def __init__(self, index_name):
url = os.environ.get('MONGO_URI')
self.db = MongoClient(url).imsearch[index_name]
def clean(self):
self.db.drop()
def insert_one(self, data):
return self.db.insert_one(data)
def insert_many(self, data):
return self.db.insert_many(data)
def find_one(self, query):
response = self.db.find_one(query)
return response
def find(self, query):
return list(self.db.find(query))
| StarcoderdataPython |
1761974 | <reponame>pozdnyakov/chromium-crosswalk
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import os
import sys
import json_parse
import schema_util
def DeleteNodes(item, delete_key):
"""Deletes the given nodes in item, recursively, that have |delete_key| as
an attribute.
"""
def HasKey(thing):
return json_parse.IsDict(thing) and thing.get(delete_key, False)
if json_parse.IsDict(item):
toDelete = []
for key, value in item.items():
if HasKey(value):
toDelete.append(key)
else:
DeleteNodes(value, delete_key)
for key in toDelete:
del item[key]
elif type(item) == list:
item[:] = [DeleteNodes(thing, delete_key)
for thing in item if not HasKey(thing)]
return item
def Load(filename):
with open(filename, 'r') as handle:
schemas = json_parse.Parse(handle.read())
return schemas
# A dictionary mapping |filename| to the object resulting from loading the JSON
# at |filename|.
_cache = {}
def CachedLoad(filename):
"""Equivalent to Load(filename), but caches results for subsequent calls"""
if filename not in _cache:
_cache[filename] = Load(filename)
# Return a copy of the object so that any changes a caller makes won't affect
# the next caller.
return copy.deepcopy(_cache[filename])
| StarcoderdataPython |
148424 | <reponame>Mahas1/Guren<filename>Guren/gifs.py
from discord.ext import commands
import utils.json_loader
class Eval(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.is_owner()
@commands.command()
async def dump_gif(self, ctx):
kiss = "kiss"
hug = "hug"
pet = "pet"
rub = "rub"
data = utils.json_loader.read_json("gifs")
data[str(kiss, hug, pet, rub)] = {"kiss": None, "hug": None, "pet": None, "rub": None}
utils.json_loader.write_json(data, "gifs")
data = utils.json_loader.read_json("gifs")
print("not ready.")
def setup(bot):
bot.add_cog(Eval(bot))
| StarcoderdataPython |
67152 | # Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
import os
import pytest
import qisys.archive
from qisys.test.conftest import skip_on_win
import qitoolchain.qipackage
# allow the existing foo/bar/baz names
# pylint: disable=blacklisted-name
# pylint: disable=unused-variable
def test_equality():
foo = qitoolchain.qipackage.QiPackage("foo", "1.2")
foo2 = qitoolchain.qipackage.QiPackage("foo", "1.2")
foo3 = qitoolchain.qipackage.QiPackage("foo", "1.3")
bar = qitoolchain.qipackage.QiPackage("bar", "1.2")
assert foo == foo2
assert foo2 < foo3
assert foo != bar
def test_from_archive(tmpdir):
foo = tmpdir.mkdir("foo")
foo_xml = foo.join("package.xml")
foo_xml.write("""<package name="foo" version="0.1"/>""")
archive = qisys.archive.compress(foo.strpath, flat=True)
package = qitoolchain.qipackage.from_archive(archive)
assert package.name == "foo"
assert package.version == "0.1"
def test_skip_package_xml(tmpdir):
foo = tmpdir.mkdir("foo")
foo_xml = foo.join("package.xml")
foo_xml.write("""<package name="foo" version="0.1"/>""")
foo.ensure("include", "foo.h", file=True)
foo.ensure("lib", "libfoo.so", file=True)
package = qitoolchain.qipackage.QiPackage("foo", path=foo.strpath)
dest = tmpdir.join("dest")
package.install(dest.strpath)
assert dest.join("include", "foo.h").check(file=True)
assert dest.join("lib", "libfoo.so").check(file=True)
assert not dest.join("package.xml").check(file=True)
def test_reads_runtime_manifest(tmpdir):
boost_path = tmpdir.mkdir("boost")
boost_path.ensure("include", "boost.h", file=True)
boost_path.ensure("lib", "libboost.so", file=True)
runtime_manifest = boost_path.ensure("install_manifest_runtime.txt", file=True)
runtime_manifest.write(r"""lib/libboost.so
""")
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
dest = tmpdir.join("dest")
installed = package.install(dest.strpath, components=["runtime"])
assert not dest.join("include", "boost.h").check(file=True)
libbost_so = dest.join("lib", "libboost.so")
assert libbost_so.check(file=True)
assert installed == ["lib/libboost.so"]
def test_backward_compat_runtime_install(tmpdir):
boost_path = tmpdir.mkdir("boost")
boost_path.ensure("include", "boost.h", file=True)
boost_path.ensure("lib", "libboost.so", file=True)
boost_path.ensure("package.xml", file=True)
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
dest = tmpdir.join("dest")
installed = package.install(dest.strpath, components=["runtime"])
assert not dest.join("include", "boost.h").check(file=True)
libbost_so = dest.join("lib", "libboost.so")
assert libbost_so.check(file=True)
assert installed == ["lib/libboost.so"]
def test_reads_release_mask(tmpdir):
qt_path = tmpdir.mkdir("qt")
qt_path.ensure("include", "qt.h", file=True)
qt_path.ensure("lib", "QtCore4.lib", file=True)
qt_path.ensure("lib", "QtCored4.lib", file=True)
qt_path.ensure("bin", "QtCore4.dll", file=True)
qt_path.ensure("bin", "QtCored4.dll", file=True)
runtime_mask = qt_path.ensure("runtime.mask", file=True)
runtime_mask.write(r"""
# headers
exclude include/.*
# .lib
exclude lib/.*\.lib
""")
release_mask = qt_path.ensure("release.mask", file=True)
release_mask.write(r"""
exclude bin/QtCored4.dll
""")
package = qitoolchain.qipackage.QiPackage("qt", path=qt_path.strpath)
dest = tmpdir.join("dest")
package.install(dest.strpath, release=True, components=["runtime"])
assert dest.join("bin", "QtCore4.dll").check(file=True)
assert not dest.join("lib", "QtCored4.lib").check(file=True)
def test_include_in_mask(tmpdir):
qt_path = tmpdir.mkdir("qt")
qt_path.ensure("bin", "assitant.exe")
qt_path.ensure("bin", "moc.exe")
qt_path.ensure("bin", "lrelease.exe")
qt_path.ensure("bin", "lupdate.exe")
runtime_mask = qt_path.ensure("runtime.mask", file=True)
runtime_mask.write(r"""
exclude bin/.*\.exe
include bin/lrelease.exe
include bin/lupdate.exe
""")
dest = tmpdir.join("dest")
package = qitoolchain.qipackage.QiPackage("qt", path=qt_path.strpath)
package.install(dest.strpath, release=True, components=["runtime"])
assert dest.join("bin", "lrelease.exe").check(file=True)
assert not dest.join("bin", "moc.exe").check(file=True)
def test_load_deps(tmpdir):
libqi_path = tmpdir.mkdir("libqi")
libqi_path.ensure("package.xml").write(r"""
<package name="libqi">
<depends testtime="true" names="gtest" />
<depends runtime="true" names="boost python" />
</package>
""")
package = qitoolchain.qipackage.QiPackage("libqi", path=libqi_path.strpath)
package.load_deps()
assert package.build_depends == set()
assert package.run_depends == set(["boost", "python"])
assert package.test_depends == set(["gtest"])
def test_extract_legacy_bad_top_dir(tmpdir):
src = tmpdir.mkdir("src")
boost = src.mkdir("boost")
boost.ensure("lib", "libboost.so", file=True)
res = qisys.archive.compress(boost.strpath)
dest = tmpdir.mkdir("dest").join("boost-1.55")
qitoolchain.qipackage.extract(res, dest.strpath)
assert dest.join("lib", "libboost.so").check(file=True)
def test_extract_legacy_ok_top_dir(tmpdir):
src = tmpdir.mkdir("src")
boost = src.mkdir("boost-1.55")
boost.ensure("lib", "libboost.so", file=True)
res = qisys.archive.compress(boost.strpath)
dest = tmpdir.mkdir("dest").join("boost-1.55")
qitoolchain.qipackage.extract(res, dest.strpath)
assert dest.join("lib", "libboost.so").check(file=True)
def test_extract_modern(tmpdir):
src = tmpdir.mkdir("src")
src.ensure("package.xml", file=True)
src.ensure("lib", "libboost.so", file=True)
output = tmpdir.join("boost.zip")
res = qisys.archive.compress(src.strpath, output=output.strpath, flat=True)
dest = tmpdir.mkdir("dest").join("boost-1.55")
qitoolchain.qipackage.extract(res, dest.strpath)
assert dest.join("lib", "libboost.so").check(file=True)
def test_installing_test_component(tmpdir):
boost_path = tmpdir.mkdir("boost")
boost_path.ensure("include", "boost.h", file=True)
boost_path.ensure("lib", "libboost.so", file=True)
boost_path.ensure("package.xml", file=True)
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
dest = tmpdir.join("dest")
installed = package.install(dest.strpath, components=["test", "runtime"])
assert not dest.join("include", "boost.h").check(file=True)
def test_get_set_license(tmpdir):
boost_path = tmpdir.mkdir("boost")
boost_path.join("package.xml").write("""
<package name="boost" version="1.58" />
""")
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
assert package.license is None
package.license = "BSD"
package2 = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
assert package2.license == "BSD"
def test_post_add_noop(tmpdir):
boost_path = tmpdir.mkdir("boost")
boost_path.join("package.xml").write("""
<package name="boost" version="1.58" />
""")
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
package.post_add() # no-op
def test_post_add_does_not_exist(tmpdir):
boost_path = tmpdir.mkdir("boost")
boost_path.join("package.xml").write(r"""
<package name="boost" version="1.58" post-add="asdf" />
""")
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
package.load_package_xml()
# pylint: disable-msg=E1101
with pytest.raises(qisys.command.NotInPath):
package.post_add()
@skip_on_win
def test_post_add(tmpdir):
boost_path = tmpdir.mkdir("boost")
boost_path.join("package.xml").write(r"""
<package name="boost" version="1.58" post-add="post-add.sh hello world" />
""")
script = boost_path.join("post-add.sh")
script.write(
'#!/bin/sh\n'
'echo $@ > foobar\n'
)
os.chmod(script.strpath, 0755)
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
package.load_package_xml()
package.post_add()
with open(os.path.join(boost_path.strpath, 'foobar')) as f:
txt = f.read()
assert "hello world" in txt
| StarcoderdataPython |
1721109 | <filename>Algos/Quick_Sort.py<gh_stars>0
def quicksort(x):
if len(x) == 1 or len(x) == 0:
return x
else:
pivot = x[0]
i = 0
for j in range(len(x)-1):
if x[j+1] < pivot:
x[j+1],x[i+1] = x[i+1], x[j+1]
i += 1
x[0],x[i] = x[i],x[0]
first_part = quicksort(x[:i])
second_part = quicksort(x[i+1:])
first_part.append(x[i])
return first_part + second_part
alist = [54,26,93,17,77,31,44,55,20]
quickSort(alist)
print(alist)
| StarcoderdataPython |
144884 | <filename>third_party/cargo/crates.bzl
"""
cargo-raze crate workspace functions
DO NOT EDIT! Replaced on runs of cargo-raze
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository")
def _new_http_archive(name, **kwargs):
if not native.existing_rule(name):
http_archive(name=name, **kwargs)
def _new_git_repository(name, **kwargs):
if not native.existing_rule(name):
new_git_repository(name=name, **kwargs)
def raze_fetch_remote_crates():
_new_http_archive(
name = "raze__bencher__0_1_5",
url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/bencher/bencher-0.1.5.crate",
type = "tar.gz",
strip_prefix = "bencher-0.1.5",
build_file = Label("//third_party/cargo/remote:bencher-0.1.5.BUILD"),
)
_new_http_archive(
name = "raze__libc__0_2_74",
url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/libc/libc-0.2.74.crate",
type = "tar.gz",
strip_prefix = "libc-0.2.74",
build_file = Label("//third_party/cargo/remote:libc-0.2.74.BUILD"),
)
_new_http_archive(
name = "raze__protobuf__2_8_2",
url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/protobuf/protobuf-2.8.2.crate",
type = "tar.gz",
strip_prefix = "protobuf-2.8.2",
build_file = Label("//third_party/cargo/remote:protobuf-2.8.2.BUILD"),
)
_new_http_archive(
name = "raze__semver__0_10_0",
url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/semver/semver-0.10.0.crate",
type = "tar.gz",
strip_prefix = "semver-0.10.0",
build_file = Label("//third_party/cargo/remote:semver-0.10.0.BUILD"),
)
_new_http_archive(
name = "raze__semver_parser__0_7_0",
url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/semver-parser/semver-parser-0.7.0.crate",
type = "tar.gz",
strip_prefix = "semver-parser-0.7.0",
build_file = Label("//third_party/cargo/remote:semver-parser-0.7.0.BUILD"),
)
| StarcoderdataPython |
3304648 | ##==================================
## External imports
##==================================
import os
import io
import flask
import urllib
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
##==================================
## Internal imports
##==================================
# Import app and cache
from app import app, cache
# Import all layouts
from layouts import loading, filler, lines, pca
# Import callbacks for individual app pages
from callbacks import lines_call, pca_call
# Import functions
from functions.global_functions import unpickleData
##==================================
## Initialize app layout
##==================================
def serve_layout():
return html.Div([
html.Div(
loading.layout(),
id = 'page-content'
),
dcc.Location(id='url', refresh = False),
]
)
app.layout = serve_layout()
##==========================================
## Callbacks to determine layout based on url
##===========================================
@app.callback(
Output('page-content', 'children'),
[
Input('url', 'pathname'),
],
)
def display_page(pathname):
# Direct user to correct layout (page)
if pathname == '/':
return lines.layout()
elif pathname == '/pca':
return pca.layout()
elif pathname is not None and 'temp' in pathname:
return loading.layout()
else:
return filler.layout()
##==========================================
## Callbacks for clearing data
##===========================================
@app.callback(
Output('url', 'pathname'),
[
Input('clear-data-link', 'n_clicks'),
],
[
State('url','pathname')
],
prevent_initial_call=True
)
def clean_refresh(n_clicks, temp_pathname):
# Prevent updates
if n_clicks is None:
raise PreventUpdate
if temp_pathname is None:
raise PreventUpdate
# Update the URL to clear the page
return temp_pathname
##==========================================
## Callback for downloading data
##===========================================
@app.server.route('/._cache-directory/')
def download_csv():
# Get the session ID when requested
session_id = flask.request.args.get('value')
# Unpickle the data based on the sessionID
data = unpickleData(session_id)
# Use string IO to make CSV for output
str_io = io.StringIO()
data.to_csv(str_io, index=False)
mem = io.BytesIO()
mem.write(str_io.getvalue().encode('utf-8'))
mem.seek(0)
str_io.close()
return flask.send_file(mem,
mimetype = 'text/csv',
attachment_filename = 'data.csv',
as_attachment = True)
##==========================================
## Calling index.py
##===========================================
if __name__ == '__main__':
# Serve app from specified port
app.run_server(host='0.0.0.0', port = 5115, debug = False)
| StarcoderdataPython |
121381 | # import XML libraries
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
import HTMLParser
# Function to create an XML structure
def make_problem_XML(
problem_title='Missing title',
problem_text=False,
label_text='Enter your answer below.',
description_text=False,
answers=[{'correctness': 'true', 'text': 'Answers are missing'}],
solution_text = '<p>Missing solution</p>',
options = {'problem_type': 'MC'}):
"""
make_problem_XML: a function to create an XML object for an edX problem.
The actual work is done by other functions below,
make_choice_problem_XML() and make_line_problem_XML(),
which use the arguments as listed below.
Arguments:
- problem_title: The title of the problem. Required.
- problem_text: The extended text for the problem, including paragraph tags and other HTML.
This argument is genuinely optional.
- label_text: The action statement for the problem. Should be a single line of text.
This is the instruction for the student and is required.
- description_text: Additional info, like "check all that apply" for those kinds of problems.
This argument is genuinely optional.
- answers: A list of dictionaries as follows:
For Numerical and Text problems:
[{'answer': a correct answer}, {'answer': another correct answer}, {etc}]
For MC and Checkbox problems, each item in the list will become an answer choice:
[{'correctness': 'true' or 'false', 'answer': 'the text for this option'}, {etc}, {etc}]
The text for MC and Checkbox can include LaTeX and images. No hints currently included.
- solution_text: The extended text for the solution, including paragraph tags and other HTML.
- options: A dictionary of options. Currently accepts:
"problem_type", which can be...
"MC": Multiple-choice problems
"Checkbox": Select-all-that-apply. Does partial credit by default.
"Numerical": Numerical problems, with a 5% tolerance
"Text": Text-entry problem
"AnyText": A custom-grader problem that marks any text entered as correct
"showanswer",
"weight",
"rerandomize", and
"max_attempts",
which take the typical values for those arguments in edX
"tolerance" for numerical problems.
Please send a decimal and we'll interpret it as a percentage. 0.1 = 10% tolerance.
Later this may include other problem types, partial credit info, etc.
The default values for these arguments are used for troubleshooting.
Return: an XML element tree.
"""
# Create the tree object with its root element
problem_tag = ET.Element('problem')
problem_tag.set('display_name', problem_title)
problem_tree = ET.ElementTree(problem_tag)
# Add a script tag so our problems can re-render properly
# with a minimum of download burden.
# Relies on having Prism.js available.
script_tag = ET.SubElement(problem_tag, 'script')
script_tag.set('type', 'text/javascript')
script_raw = """
$(document).ready(function(){
console.log('highlighting MATLAB syntax');
$('.language-matlab').each(function(e){
window.Prism.highlightAllUnder(this);
});
});
"""
script_tag.text = script_raw
# Set other problem options. For partial documentation see:
# https://edx.readthedocs.io/projects/edx-open-learning-xml/en/latest/components/problem-components.html
if 'showanswer' in options:
problem_tag.set('showanswer', options['showanswer'])
if 'weight' in options:
problem_tag.set('weight', options['weight'])
if 'rerandomize' in options:
problem_tag.set('rerandomize', options['rerandomize'])
if 'max_attempts' in options:
problem_tag.set('max_attempts', options['max_attempts'])
# Add the problem text
if problem_text is not False:
problem_tag.text = problem_text
# Pass the tree to functions that build the rest of the problem XML.
if options['problem_type'] == 'Numerical' or options['problem_type'] == 'Text':
return make_line_problem_XML(
problem_tree, problem_tag, problem_text, label_text, description_text,
answers, solution_text, options
)
elif options['problem_type'] == 'MC' or options['problem_type'] == 'Checkbox':
return make_choice_problem_XML(
problem_tree, problem_tag, problem_text, label_text, description_text,
answers, solution_text, options
)
elif options['problem_type'] == 'AnyText':
return make_anytext_problem_XML(
problem_tree, problem_tag, problem_text, label_text, description_text,
answers, solution_text, options
)
else:
# Leaving out error messages until we decide which version of Python we're using.
# print 'The ' + str(options['problem_type']) + ' problem type is not currently supported.'
return False
# Function to create the XML structure for MC and checkbox problems
# Parameters are described under make_problem_XML() above.
def make_choice_problem_XML(
problem_tree,
problem_tag,
problem_text=False,
label_text='Enter your answer below.',
description_text=False,
answers=[{'correctness': 'true', 'answer': 'Answers are missing'}],
solution_text = '<p>Missing solution</p>',
options = {'problem_type': 'MC'}):
# Create the structure for the problem.
if options['problem_type'] == 'MC':
type_tag = ET.SubElement(problem_tag, 'multiplechoiceresponse')
type_tag.set('type','MultipleChoice')
elif options['problem_type'] == 'Checkbox':
type_tag = ET.SubElement(problem_tag, 'choiceresponse')
type_tag.set('partial_credit', 'EDC')
# Needs some expansion for various extra credit options.
if 'extra_credit' in options:
type_tag.set('extra_credit', options['extra_credit'])
label_tag = ET.SubElement(type_tag, 'label')
label_tag.text = label_text
if options['problem_type'] == 'Checkbox' and description_text is False:
description_text = 'Check all that apply.'
if description_text is not False:
description_tag = ET.SubElement(type_tag, 'description')
description_tag.text = description_text
if options['problem_type'] == 'MC':
choicegroup_tag = ET.SubElement(type_tag, 'choicegroup')
elif options['problem_type'] == 'Checkbox':
choicegroup_tag = ET.SubElement(type_tag, 'checkboxgroup')
# Iterate over the choices and add them one by one.
for item in answers:
item_tag = ET.SubElement(choicegroup_tag, 'choice')
item_tag.set('correct', item['correctness'])
item_tag.text = item['answer']
if 'hint' in item:
hint_tag = ET.SubElement(item_tag, 'choicehint')
hint_tag.text = item['hint']
# Create the structure for the solution
solution_tag = ET.SubElement(type_tag, 'solution')
solution_div_tag = ET.SubElement(solution_tag, 'div')
solution_div_tag.set('class', 'detailed-solution')
explanation_p_tag = ET.SubElement(solution_div_tag, 'p')
explanation_p_tag.text = 'Explanation'
explanation_p_tag.tail = solution_text
return problem_tree
# Function to create the XML structure for numerical or text problems.
# Parameters are described under make_problem_XML() above.
def make_line_problem_XML(
problem_tree,
problem_tag,
problem_text=False,
label_text='Enter your answer below.',
description_text=False,
answers=[{'answer': '-1'}],
solution_text = '<p>Missing solution</p>',
options = {'problem_type': 'Text'}):
# Create the structure for the problem.
if options['problem_type'] == 'Numerical':
type_tag = ET.SubElement(problem_tag, 'numericalresponse')
if 'tolerance' not in options:
options['tolerance'] = 0.05 # 5% tolerance on numerical problems by default.
else:
type_tag = ET.SubElement(problem_tag, 'stringresponse')
type_tag.set('type', 'ci') # case-insensitive by default.
# Needs some expansion for various extra credit options.
# if 'extra_credit' in options:
# type_tag.set('extra_credit', options['extra_credit'])
type_tag.set('answer', answers[0]['answer'])
label_tag = ET.SubElement(type_tag, 'label')
label_tag.text = label_text
if description_text is not False:
description_tag = ET.SubElement(type_tag, 'description')
description_tag.text = description_text
# Add additional answers if they exist.
if len(answers) > 1:
for item in answers:
additional_answer_tag = ET.SubElement(type_tag, 'additional_answer')
additional_answer_tag.set('answer', item['answer'])
if options['problem_type'] == 'Numerical':
input_tag = ET.SubElement(type_tag, 'formulaequationinput')
tolerance_tag = ET.SubElement(type_tag, 'responseparam')
tolerance_tag.set('type', 'tolerance')
tolerance_tag.set('default', str(int(float(options['tolerance']) * 100)) + '%')
else:
input_tag = ET.SubElement(type_tag, 'textline')
input_tag.set('size', '30')
# Create the structure for the solution
solution_tag = ET.SubElement(type_tag, 'solution')
solution_div_tag = ET.SubElement(solution_tag, 'div')
solution_div_tag.set('class', 'detailed-solution')
explanation_p_tag = ET.SubElement(solution_div_tag, 'p')
explanation_p_tag.text = 'Explanation'
explanation_p_tag.tail = solution_text
return problem_tree
# Function to create the XML structure for "anything is correct" problems.
# Parameters are described under make_problem_XML() above.
def make_anytext_problem_XML(
problem_tree,
problem_tag,
problem_text=False,
label_text='Enter your answer below.',
description_text=False,
answers=[{'correctness': 'true', 'answer': 'Answers are missing'}],
solution_text = '<p>Missing solution</p>',
options = {'problem_type': 'AnyText', 'feedback':'Thank you for your response.'}):
# Insert the python grading script
pythonscript = """
<![CDATA[
def test_text(expect, ans):
if ans:
return True
def hint_fn(answer_ids, student_answers, new_cmap, old_cmap):
aid = answer_ids[0]
hint = ''
hint = '""" + options['feedback'] + """'.format(hint)
new_cmap.set_hint_and_mode(aid,hint,'always')
]]>
"""
script_tag = ET.SubElement(problem_tag, 'script')
script_tag.set('type','loncapa/python')
script_tag.text = pythonscript
# Make the customresponse tag and its sub-tags
type_tag = ET.SubElement(problem_tag, 'customresponse')
type_tag.set('cfn', 'test_text')
type_tag.set('expect', 'anything')
textline_tag = ET.SubElement(type_tag, 'textline')
textline_tag.set('size', '40')
textline_tag.set('correct_answer', 'anything')
textline_tag.set('label', 'Your response')
hintgroup_tag = ET.SubElement(type_tag, 'hintgroup')
hintgroup_tag.set('hintfn', 'hint_fn')
# Create the structure for the solution
solution_tag = ET.SubElement(type_tag, 'solution')
solution_div_tag = ET.SubElement(solution_tag, 'div')
solution_div_tag.set('class', 'detailed-solution')
explanation_p_tag = ET.SubElement(solution_div_tag, 'p')
explanation_p_tag.text = 'Explanation'
explanation_p_tag.tail = solution_text
return problem_tree
def write_problem_file(problem_XML, problem_filename):
"""
write_problem_file: write a complete edX problem XML structure to disk.
Arguments:
- problem_XML: The ElementTree object for the problem.
- problem_filename: The filename.
Return: True if successful, False if not.
Outputs: A pretty-printed XML file at 4 spaces per indent
"""
# HTML entities in the problem text get encoded during the XML-writing step, so we need to decode them here.
parser = HTMLParser.HTMLParser()
xml_string = minidom.parseString(ET.tostring(problem_XML.getroot())).toprettyxml(indent=" ")
xml_string = parser.unescape(xml_string)
with open(problem_filename, "w") as f:
# We start from character 23 because the XML declaration is an unwanted 22 characters (counting \r).
# I should do this better, but this works for now.
f.writelines(xml_string[23:])
#################
# Testing code
#################
"""
# Make an MC problem
title = 'Sample MC Problem'
text = '<p>test text</p>'
label = 'test label'
answers = [{'answer': 'wrong one', 'correctness': 'false', 'hint':'Don\'t choose the wrong one.'}, {'answer': 'right one', 'correctness': 'true', 'hint':'The right one was right!'}]
solution = '<p>blank solution</p>'
options = {'problem_type': 'MC'}
the_xml = make_problem_XML(
problem_title = title,
problem_text = text,
label_text = label,
answers = answers,
solution_text = solution,
options = options)
write_problem_file(the_xml, 'test_MC_problem.xml')
# Make a checkbox problem
title = 'Sample Checkbox Problem'
text = '<p>test text</p>'
label = 'test label'
answers = [{'answer': 'wrong one', 'correctness': 'false'}, {'answer': 'right one', 'correctness': 'true'}]
solution = '<p>blank solution</p>'
options = {'problem_type': 'Checkbox'}
the_xml = make_problem_XML(
problem_title = title,
problem_text = text,
label_text = label,
answers = answers,
solution_text = solution,
options = options)
write_problem_file(the_xml, 'test_check_problem.xml')
# Make a numerical problem
title = 'Sample Numerical Problem'
text = '<p>The answer is 50</p>'
label = 'test label'
answers = [{'answer': '50'}]
solution = '<p>blank solution</p>'
options = {'problem_type': 'Numerical'}
the_xml = make_problem_XML(
problem_title = title,
problem_text = text,
label_text = label,
answers = answers,
solution_text = solution,
options = options)
write_problem_file(the_xml, 'test_numerical_problem.xml')
# Make a text problem
title = 'Sample Text Problem'
text = '<p>The answer is "kaboom"</p>'
label = 'test label'
answers = [{'answer': 'kaboom'}]
solution = '<p>blank solution</p>'
options = {'problem_type': 'Text'}
the_xml = make_problem_XML(
problem_title = title,
problem_text = text,
label_text = label,
answers = answers,
solution_text = solution,
options = options)
write_problem_file(the_xml, 'test_text_problem.xml')
# Make an AnyText problem
title = 'Sample AnyText Problem'
text = '<p>Any answer should work</p>'
label = 'test label'
answers = [{'answer': 'this should never appear'}]
solution = '<p>blank solution</p>'
options = {'problem_type': 'AnyText', 'feedback':'Thank you for your response.'}
the_xml = make_problem_XML(
problem_title = title,
problem_text = text,
label_text = label,
answers = answers,
solution_text = solution,
options = options)
write_problem_file(the_xml, 'test_anytext_problem.xml')
"""
| StarcoderdataPython |
3684 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -m nibetaseries` python will execute
``__main__.py`` as a script. That means there won't be any
``nibetaseries.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``nibetaseries.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
from __future__ import absolute_import
import os
import argparse
from argparse import RawTextHelpFormatter
from glob import glob
from multiprocessing import cpu_count
from nipype import config as ncfg
def get_parser():
"""Build parser object"""
from ..__init__ import __version__
import sys
verstr = 'nibs v{}'.format(__version__)
parser = argparse.ArgumentParser(description='NiBetaSeries BIDS arguments',
formatter_class=RawTextHelpFormatter)
parser.add_argument('bids_dir', help='The directory with the input dataset '
'formatted according to the BIDS standard.')
parser.add_argument('derivatives_pipeline', help='The pipeline that contains '
'minimally preprocessed img, brainmask, and confounds.tsv')
parser.add_argument('output_dir', help='The directory where the output directory '
'and files should be stored. If you are running group level analysis '
'this folder should be prepopulated with the results of the'
'participant level analysis.')
parser.add_argument('analysis_level', choices=['participant', 'group'],
help='Level of the analysis that will be performed '
'Multiple participant level analyses can be run independently '
'(in parallel) using the same output_dir')
parser.add_argument('-v', '--version', action='version',
version=verstr)
# Atlas Arguments (Required Options)
atlas_args = parser.add_argument_group('Required Atlas Arguments')
atlas_args.add_argument('-a', '--atlas-img', action='store',
required=('-l' in sys.argv or '--atlas-lut' in sys.argv),
help='input atlas nifti where each voxel within a "region" '
'is labeled with the same integer and there is a unique '
'integer associated with each region of interest.')
atlas_args.add_argument('-l', '--atlas-lut', action='store',
required=('-a' in sys.argv or '--atlas-img' in sys.argv),
help='atlas look up table (tsv) formatted with the columns: '
'index, regions which correspond to the regions in the '
'nifti file specified by --atlas-img.')
# preprocessing options
proc_opts = parser.add_argument_group('Options for processing')
proc_opts.add_argument('--estimator', default='lss',
choices=['lss', 'lsa'],
help='beta series modeling method')
proc_opts.add_argument('-sm', '--smoothing-kernel', action='store', type=float, default=6.0,
help='select a smoothing kernel (mm)')
proc_opts.add_argument('-hp', '--high-pass', action='store', type=float,
default=0.0078125, help='high pass filter (Hz)')
proc_opts.add_argument('-c', '--confounds', help='The confound column names '
'that are to be included in nuisance regression. '
'write the confounds you wish to include separated by a space',
nargs="+")
proc_opts.add_argument('--hrf-model', default='glover',
choices=['glover', 'spm', 'fir',
'glover + derivative',
'glover + derivative + dispersion',
'spm + derivative',
'spm + derivative + dispersion'],
help='convolve your regressors '
'with one of the following hemodynamic response functions')
proc_opts.add_argument('--fir-delays', default=None,
nargs='+', type=int, help='FIR delays in volumes',
metavar='VOL')
proc_opts.add_argument('-w', '--work-dir', help='directory where temporary files '
'are stored (i.e. non-essential files). '
'This directory can be deleted once you are reasonably '
'certain nibs finished as expected.')
# Image Selection options
image_opts = parser.add_argument_group('Options for selecting images')
parser.add_argument('--participant-label', nargs="+",
help='The label(s) of the participant(s) '
'that should be analyzed. The label '
'corresponds to sub-<participant_label> from the BIDS spec '
'(so it does not include "sub-"). If this parameter is not '
'provided all subjects should be analyzed. Multiple '
'participants can be specified with a space separated list.')
image_opts.add_argument('--session-label', action='store',
default=None, help='select a session to analyze')
image_opts.add_argument('-t', '--task-label', action='store',
default=None, help='select a specific task to be processed')
image_opts.add_argument('--run-label', action='store',
default=None, help='select a run to analyze')
image_opts.add_argument('-sp', '--space-label', action='store', default='MNI152NLin2009cAsym',
choices=['MNI152NLin2009cAsym'],
help='select a bold derivative in a specific space to be used')
image_opts.add_argument('--description-label', action='store',
default=None, help='select a bold file with particular '
'`desc` label to process')
image_opts.add_argument('--exclude-description-label', action='store_true',
default=False, help='exclude this `desc` label from nibetaseries')
# performance options
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--nthreads', '-n-cpus', action='store', type=int,
help='maximum number of threads across all processes')
g_perfm.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
# misc options
misc = parser.add_argument_group('misc options')
misc.add_argument('--graph', action='store_true', default=False,
help='generates a graph png of the workflow')
return parser
def main():
from ..workflows.base import init_nibetaseries_participant_wf
# get commandline options
opts = get_parser().parse_args()
# check inputs
if (opts.hrf_model == 'fir') and (opts.fir_delays is None):
raise ValueError('If the FIR HRF model is selected, '
'FIR delays must be provided.')
# Set up directories
# TODO: set up some sort of versioning system
bids_dir = os.path.abspath(opts.bids_dir)
derivatives_pipeline_dir = os.path.join(bids_dir, 'derivatives', opts.derivatives_pipeline)
output_dir = os.path.abspath(opts.output_dir)
os.makedirs(output_dir, exist_ok=True)
log_dir = os.path.join(output_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
if opts.work_dir:
work_dir = os.path.abspath(opts.work_dir)
else:
work_dir = os.path.join(os.getcwd(), 'nibetaseries_work')
os.makedirs(work_dir, exist_ok=True)
# only for a subset of subjects
if opts.participant_label:
subject_list = opts.participant_label
# for all subjects
else:
subject_dirs = glob(os.path.join(bids_dir, "sub-*"))
subject_list = [subject_dir.split("-")[-1] for subject_dir in subject_dirs]
# Nipype plugin configuration
# Load base plugin_settings from file if --use-plugin
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
plugin_settings.setdefault('plugin_args', {})
else:
# Defaults
plugin_settings = {
'plugin': 'MultiProc',
'plugin_args': {
'raise_insufficient': False,
'maxtasksperchild': 1,
}
}
# Resource management options
# Note that we're making strong assumptions about valid plugin args
# This may need to be revisited if people try to use batch plugins
nthreads = plugin_settings['plugin_args'].get('n_procs')
# Permit overriding plugin config with specific CLI options
if nthreads is None or opts.nthreads is not None:
nthreads = opts.nthreads
if nthreads is None or nthreads < 1:
nthreads = cpu_count()
plugin_settings['plugin_args']['n_procs'] = nthreads
# Nipype config (logs and execution)
ncfg.update_config({
'logging': {'log_directory': log_dir,
'log_to_file': True},
'execution': {'crashdump_dir': log_dir,
'crashfile_format': 'txt',
'parameterize_dirs': False},
})
# running participant level
if opts.analysis_level == "participant":
nibetaseries_participant_wf = init_nibetaseries_participant_wf(
estimator=opts.estimator,
atlas_img=os.path.abspath(opts.atlas_img),
atlas_lut=os.path.abspath(opts.atlas_lut),
bids_dir=bids_dir,
derivatives_pipeline_dir=derivatives_pipeline_dir,
exclude_description_label=opts.exclude_description_label,
fir_delays=opts.fir_delays,
hrf_model=opts.hrf_model,
high_pass=opts.high_pass,
output_dir=output_dir,
run_label=opts.run_label,
selected_confounds=opts.confounds,
session_label=opts.session_label,
smoothing_kernel=opts.smoothing_kernel,
space_label=opts.space_label,
subject_list=subject_list,
task_label=opts.task_label,
description_label=opts.description_label,
work_dir=work_dir,
)
if opts.graph:
nibetaseries_participant_wf.write_graph(graph2use='colored',
format='svg',
simple_form=True)
try:
nibetaseries_participant_wf.run(**plugin_settings)
except RuntimeError as e:
if "Workflow did not execute cleanly" in str(e):
print("Workflow did not execute cleanly")
else:
raise e
elif opts.analysis_level == "group":
raise NotImplementedError('group analysis not currently implemented')
def init():
if __name__ == "__main__":
raise RuntimeError("NiBetaSeries/cli/run.py should not be run directly;\n"
"Please `pip install` NiBetaSeries and use the `nibs` command")
init()
| StarcoderdataPython |
1688833 | <filename>apps/dg_test/img2tensor.py
import numpy as np
import sys
import getopt
import os.path
import os
from PIL import Image
def createImagelist(imageDir, imageListDir) :
print('')
print('Reading png images...')
os.system('ls -d '+ imageDir + '/*.png >> ' + imageListDir)
print('')
print('Reading jpg images...')
os.system('ls -d '+ imageDir + '/*.jpg >> ' + imageListDir)
print('')
print('Reading jpeg images...')
os.system('ls -d '+ imageDir + '/*.jpeg >> ' + imageListDir)
return imageListDir
def load_image(fileName) :
img = Image.open(fileName)
data = np.asarray(img, dtype=np.float32)
data = data.flatten()
return data
if((len(sys.argv) != 7)):
print('Usage: python img2tensor.py -d <image_directory> -i <imagetag.txt> -o <output_tensor.f32>')
quit()
opts, args = getopt.getopt(sys.argv[1:], 'd:i:o:')
imageDir = ''
tensorDir = ''
imageListDir = ''
for opt, arg in opts:
if opt == '-d':
imageDir = arg
elif opt == '-i':
imageListDir = arg
elif opt == '-o':
tensorDir = arg
list_filename = createImagelist(imageDir, imageListDir)
with open(list_filename) as f:
imglist = f.readlines()
imglist = [x.strip() for x in imglist]
#os.system('rm ' + imageListDir)
num_images = len(imglist)
if num_images == 0 :
print('')
print('There are no images found in the directory: ' + imageDir)
quit()
print('')
print('Total number of images read: ' + str(num_images))
print('Creating a tensor with batch size ' + str(num_images) + '...')
# Read images and convert to tensor
op_tensor = np.array([], dtype=np.float32)
for img in imglist:
rgbBuf = load_image(img)
op_tensor = np.append(op_tensor, rgbBuf)
with open(tensorDir, 'w') as f:
op_tensor.astype('float32').tofile(f)
print('')
print('Done')
| StarcoderdataPython |
88849 | <reponame>rsketine/neon<gh_stars>1000+
#!/usr/bin/env python
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from argparse import ArgumentParser
from convert_xml_to_json import convert_xml_to_json
import numpy as np
import os
import tarfile
import ingest_utils as util
from collections import OrderedDict
from tqdm import tqdm
from neon.util.persist import get_data_cache_or_nothing
def get_ssd_config(img_reshape, inference=False):
ssd_config = OrderedDict()
ssd_config['batch_size'] = 32
ssd_config['shuffle_enable'] = True
ssd_config['shuffle_manifest'] = True
if inference:
ssd_config['batch_size'] = 1
ssd_config['block_size'] = 50
ssd_config['cache_directory'] = get_data_cache_or_nothing(subdir='pascalvoc_cache')
ssd_config["etl"] = [{
"type": "localization_ssd",
"height": img_reshape[0],
"width": img_reshape[1],
"max_gt_boxes": 500,
"class_names": ["__background__", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant",
"sheep", "sofa", "train", "tvmonitor"]
}, {
"type": "image",
"height": img_reshape[0],
"width": img_reshape[1],
"channels": 3
}]
if not inference:
ssd_config["augmentation"] = [{
"type": "image",
"crop_enable": False,
"flip_enable": True,
"expand_ratio": [1., 4.],
"expand_probability": 0.5,
# "emit_constraint_type": "center", TODO: enable when adds support for no gt boxes
"brightness": [0.9, 1.1],
"hue": [-18, 18],
"saturation": [0.9, 1.1],
"contrast": [0.9, 1.1],
"batch_samplers":
[
{
"max_sample": 1,
"max_trials": 1
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.1}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.3}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.5}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.7}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.9}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"max_jaccard_overlap": 1.0, "min_jaccard_overlap": 0.1}
}
]
}]
if img_reshape == (300, 300):
ssd_config['ssd_config'] = OrderedDict(
[('conv4_3', {'min_sizes': 30.0, 'max_sizes': 60.0,
'aspect_ratios': 2.0, 'step': 8, 'normalize': True}),
('fc7', {'min_sizes': 60.0, 'max_sizes': 111.0,
'aspect_ratios': (2.0, 3.0), 'step': 16}),
('conv6_2', {'min_sizes': 111.0, 'max_sizes': 162.0,
'aspect_ratios': (2.0, 3.0), 'step': 32}),
('conv7_2', {'min_sizes': 162.0, 'max_sizes': 213.0,
'aspect_ratios': (2.0, 3.0), 'step': 64}),
('conv8_2', {'min_sizes': 213.0, 'max_sizes': 264.0,
'aspect_ratios': 2.0, 'step': 100}),
('conv9_2', {'min_sizes': 264.0, 'max_sizes': 315.0,
'aspect_ratios': 2.0, 'step': 300})])
elif img_reshape == (512, 512):
ssd_config['ssd_config'] = OrderedDict(
[('conv4_3', {'min_sizes': 35.84, 'max_sizes': 76.80,
'aspect_ratios': 2.0, 'step': 8, 'normalize': True}),
('fc7', {'min_sizes': 76.80, 'max_sizes': 153.6,
'aspect_ratios': (2.0, 3.0), 'step': 16}),
('conv6_2', {'min_sizes': 153.6, 'max_sizes': 230.4,
'aspect_ratios': (2.0, 3.0), 'step': 32}),
('conv7_2', {'min_sizes': 230.4, 'max_sizes': 307.2,
'aspect_ratios': (2.0, 3.0), 'step': 64}),
('conv8_2', {'min_sizes': 307.2, 'max_sizes': 384.0,
'aspect_ratios': 2.0, 'step': 128}),
('conv9_2', {'min_sizes': 384.0, 'max_sizes': 460.8,
'aspect_ratios': 2.0, 'step': 256}),
('conv10_2', {'min_sizes': 460.8, 'max_sizes': 537.8,
'aspect_ratios': 2.0, 'step': 512})])
else:
raise ValueError("Image shape of {} not supported.".format(img_reshape))
return ssd_config
def extract_tarfiles(tarfiles, out_dir):
for file in tarfiles:
with tarfile.open(file, 'r') as t:
print("Extracting {} to {}".format(file, out_dir))
t.extractall(out_dir)
def get_tag_list(index_file):
with open(index_file) as f:
tag_list = [tag.rstrip(os.linesep) for tag in f]
return tag_list
def ingest_pascal(data_dir, out_dir, img_reshape=(300, 300), overwrite=False, skip_untar=False):
assert img_reshape is not None, "Target image reshape required."
hw = '{}x{}'.format(img_reshape[0], img_reshape[1])
datasets = ['VOC2007', 'VOC2012']
tar_files = {'VOC2007': ['VOCtrainval_06-Nov-2007.tar', 'VOCtest_06-Nov-2007.tar'],
'VOC2012': ['VOCtrainval_11-May-2012.tar']}
index_name = {'trainval': 'trainval.txt', 'test': 'test.txt'}
manifest = {'trainval': [], 'test': []}
root_dir = os.path.join(out_dir, 'VOCdevkit')
train_manifest = os.path.join(root_dir, 'train_{}.csv'.format(hw))
val_manifest = os.path.join(root_dir, 'val_{}.csv'.format(hw))
if os.path.exists(train_manifest) and os.path.exists(val_manifest) and not overwrite:
print("Manifest files already found, skipping ingest.")
print("Use --overwrite flag to force re-ingest.")
return
for year in datasets:
tags = {'trainval': [], 'test': []}
# define paths
if skip_untar is False:
tarfiles = [os.path.join(data_dir, tar) for tar in tar_files[year]]
extract_tarfiles(tarfiles, out_dir)
# read the index files and build a list of tags to process
# in PASCALVOC, each tag (e.g. '000032') refers to an image (000032.jpg)
# and an annotation XML file (000032.xml)
for sets in index_name.keys():
index_file = os.path.join(root_dir, year, 'ImageSets', 'Main', index_name[sets])
if os.path.exists(index_file):
tag_list = get_tag_list(index_file)
tags[sets].extend(tag_list)
print('Found {} images in {}'.format(len(tag_list), index_file))
img_folder = os.path.join(root_dir, year, 'JPEGImages')
annot_folder = os.path.join(root_dir, year, 'Annotations')
# create data folders to save converted images and annotations
target_img_folder = os.path.join(root_dir, year, 'JPEGImages-converted')
target_annot_folder = os.path.join(root_dir, year, 'Annotations-json')
print('Processing {}'.format(year))
util.make_dir(target_img_folder)
util.make_dir(target_annot_folder)
all_tags = tags['trainval'] + tags['test'] # process all the tags in our index files.
for tag in tqdm(all_tags):
image = os.path.join(img_folder, tag + '.jpg')
annot = os.path.join(annot_folder, tag + '.xml')
assert os.path.exists(image)
assert os.path.exists(annot)
target_image = os.path.join(target_img_folder, tag + '.jpg')
target_annot = os.path.join(target_annot_folder, tag + '.json')
# convert the annotations to json, including difficult objects
convert_xml_to_json(annot, target_annot, difficult=True, img_reshape=None)
util.resize_image(image, target_image, img_reshape=None)
if tag in tags['trainval']:
manifest['trainval'].append((target_image, target_annot))
elif tag in tags['test']:
manifest['test'].append((target_image, target_annot))
np.random.seed(0)
np.random.shuffle(manifest['trainval'])
util.create_manifest(train_manifest, manifest['trainval'], root_dir)
util.create_manifest(val_manifest, manifest['test'], root_dir)
# write SSD CONFIG
ssd_config = get_ssd_config(img_reshape)
ssd_config_path = os.path.join(root_dir, 'pascalvoc_ssd_{}.cfg'.format(hw))
util.write_ssd_config(ssd_config, ssd_config_path, True)
# write SSD VAL CONFIG
ssd_config_val = get_ssd_config(img_reshape, True)
ssd_config_path_val = os.path.join(root_dir, 'pascalvoc_ssd_{}_val.cfg'.format(hw))
util.write_ssd_config(ssd_config_val, ssd_config_path_val, True)
config_path = os.path.join(root_dir, 'pascalvoc_{}.cfg'.format(hw))
config = {'manifest': '[train:{}, val:{}]'.format(train_manifest, val_manifest),
'manifest_root': root_dir,
'epochs': 230,
'height': img_reshape[0],
'width': img_reshape[1],
'ssd_config': '[train:{}, val:{}]'.format(ssd_config_path, ssd_config_path_val)
}
util.write_config(config, config_path)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--input_dir', required=True, help='path to directory with vocdevkit data')
parser.add_argument('--output_dir', required=True, help='output directory')
parser.add_argument('--overwrite', action='store_true', help='overwrite files')
parser.add_argument('--height', type=int, default=300, help='height of reshaped image')
parser.add_argument('--width', type=int, default=300, help='width of reshape image')
parser.add_argument('--skip_untar', action='store_true',
help='skip the untar. Use if unzipped files already exist.')
args = parser.parse_args()
ingest_pascal(args.input_dir, args.output_dir, img_reshape=(args.height, args.width),
overwrite=args.overwrite, skip_untar=args.skip_untar)
| StarcoderdataPython |
104689 | __author__ = 'Kalyan'
notes = '''
nested functions underlie many advanced features of python. So a basic understanding of this
feature is essential to mastering python.
nested functions are defined in the scope of a function, behave exactly the same except
that they have a read only access to variables in the outer function.
'''
from placeholders import *
def outer_func(outer_var):
def inner_func(inner_var):
return outer_var + inner_var
return inner_func
def test_inner_func_scope():
# inner_func not accessible by default
try:
inner_func()
except NameError: # fill up the exception
pass
# this syntax does not work either, it is not just static scoping.
try:
outer_func.inner_func()
except AttributeError : # fillup the exception
pass
def test_inner_func_can_be_returned():
f1 = outer_func(10)
assert "function" == type(f1).__name__
assert 30 == f1(20)
def test_each_invocation_returns_a_new_func():
f1 = outer_func(10)
f2 = outer_func(10)
assert False == (f1 is f2)
assert False == (f1 == f2)
f3 = f2
assert True == (f3 is f2)
assert True == (f3 == f2)
def test_inner_func_has_access_to_outer_variables_after_return():
f1 = outer_func(20)
f2 = outer_func(50)
assert 50 == f1(30)
assert 60 == f1(40)
assert 80 == f2(30)
assert 90 == f2(40)
def print_attributes(obj):
for x in dir(obj):
print("attribute: " + x)
print(getattr(obj, x))
def test_inner_func_attributes():
f1 = outer_func(10)
assert 35 == len(dir(f1)) #how many attributes does f1 have
# use the print_attributes function to explore the properties
# fill up the attribute name that you think holds a reference to the
# function scope variables
# ref_to_outer_scope = __
# if you understand this, you have understood nested funcs :).
# Also a good idea to use the visualizer to understand this code...
def test_inner_func_late_binding():
def outer():
funcs = []
for x in range(5):
def inner():
return x
funcs.append(inner)
result = []
for func in funcs:
result.append(func())
return result
assert [4, 4, 4, 4, 4] == outer()
# just to re-iterate what you have learnt above. Search if this is not clear!
def test_inner_func_late_binding2():
def outer(nums):
def inner():
nums.append(10)
assert [10, 20, 30] == nums # see the function invocation below.
inner()
assert [10, 20, 30, 10] == nums
# assign nums to another empty list
nums = []
inner()
assert [10] == nums
inner()
assert [10, 10] == nums
return inner
# I have called all variables nums, so you can check if you understanding of names
# and scopes is right :-).
nums = [10, 20, 30]
f1 = outer(nums)
assert [10, 20, 30, 10] == nums
f1()
assert [10, 20, 30, 10] == nums
# generally you should not write code like this :), this is only to learn
def test_outer_scope_reads():
y = 30
def outer(x):
def inner1():
x = 30
return x
def inner2():
return x + y
def inner3():
# note what happens to outer y here
y = 10
return x + y
return [inner1(), inner2(), inner3(), x, y]
assert [30, 50, 30, 20, 30] == outer(20)
def test_outer_scope_write():
y = 10
def outer(x):
def inner1():
nonlocal y # this syntax allows you to bind to outer y
y = 40
return y
def inner2():
y = 30
return y
def inner3():
return y
return [x, y, inner1(), inner2(), inner3()]
assert [25, 10, 40, 30, 40] == outer(25)
# def is an executable statement. the function name is nothing more than a name
# binding to a code object! So same scope rules as variables apply to function names.
# read up more at http://effbot.org/zone/default-values.htm
def test_def_is_a_statement():
def outer(x):
if x > 10:
def f():
return x * 2
else:
def f():
return x * 3
return f
assert 40 == outer(20)()
assert 15 == outer(5)()
three_things_i_learnt = """
-
-
-
"""
| StarcoderdataPython |
1614691 | <gh_stars>0
def get_formatted_name(first, last):
full_name = first + ' ' + last
return full_name.title() | StarcoderdataPython |
4817730 | <filename>Lecture-6/Code/OpenHashing_Lookup.py
def lookup(s):
j = 0
while t[(h(s) - g(s, j)) mod m] \
is not None:
if t[(h(s) - g(s, j)) mod m][0] != s:
j += 1
if t[(h(s) - g(s, j)) mod m][0] == s:
return t[(h(s) - g(s, j)) mod m]
return None
| StarcoderdataPython |
77296 | <filename>code/utilities.py
# python imports
import sys, signal, math, copy, random, os, re
# torch imports
import torch
import torch.nn as nn
# numpy imports
import numpy as np
# sklearn imports
import sklearn.cluster as skcl
# graphviz imports
from graphviz import Digraph
import networkx as nx
# natural language imports
import spacy
import penman
# code imports
import code.node_classes as nc
#################
# General utilities
#################
def uniquify(lst):
seen = set()
seen_add = seen.add
return [x for x in lst if not (x in seen or seen_add(x))]
def position(el, lst, key=None):
if key:
return next(i for i, x in enumerate(lst) if key(x) == key(el))
return next(i for i, x in enumerate(lst) if x == el)
def calc_metrics(true_pos, false_pos, false_neg):
prec_denom = (true_pos + false_pos) if (true_pos + false_pos) > 0 else 1
precision = true_pos / prec_denom
rec_denom = (true_pos + false_neg) if (true_pos + false_neg) > 0 else 1
recall = true_pos / rec_denom
pr_denom = (precision + recall) if (precision + recall) > 0 else 1
f1 = 2 * precision * recall / pr_denom
return precision, recall, f1
class TimeoutError(Exception): pass
def timeout(func, args=(), kwargs={}, duration=1):
def handler(signum, frame):
raise TimeoutError()
signal.signal(signal.SIGALRM, handler)
signal.alarm(duration)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
def deconstruct_expr(expr, par_d=None):
if par_d == None: par_d = {}
# assumes s-expr where expr[0] is NOT a nested expression
ret_set = set([expr])
if type(expr) == tuple:
assert type(expr[0]) != tuple
for i, el in enumerate(expr):
if i == 0: continue
if not el in par_d: par_d[el] = set()
par_d[el].add(expr)
n_els, _ = deconstruct_expr(el, par_d)
ret_set = ret_set.union(n_els)
return ret_set, par_d
def get_ancestor_label_chains(orig_expr, par_dict, src_graph=None, depth=None, ret_all=True):
def key_form(expr):
label_of = expr[0] if type(expr) == tuple else expr
if src_graph != None: return (label_of, src_graph.nodes[expr]['type'])
else: return label_of
expl = [[orig_expr]]
ret_lsts = set()
while expl:
curr = expl.pop()
k_f = curr[:-1] + [key_form(c) for c in curr[-1:]]
last = curr[len(curr) - 1]
if last in par_dict and (depth == None or len(curr) < depth):
if ret_all:
ret_lsts.add(tuple(k_f[1:]))
for p in par_dict[last]:
expl.append(k_f + [p])
elif len(k_f) > 1:
ret_lsts.add(tuple(k_f[1:]))
return ret_lsts
def make_anon_formula(expr, src_graph):
new_tup = []
if type(expr) == tuple:
assert type(expr[0]) != tuple
for i, el in enumerate(expr):
new_tup.append(make_anon_formula(el, src_graph))
return tuple(new_tup)
elif expr in src_graph.nodes and src_graph.nodes[expr]['type'] == nc.VarType:
return 'VAR'
return expr
def formula_elements_lst(expr, src_graph, anon_var=True, anon_leaf=False, depth=1000):
els = []
if depth == 0:
return expr if type(expr) != tuple else expr[0]
elif type(expr) == tuple:
if expr in src_graph.nodes[expr]['type'] == nc.SkolemFuncType:
return ['VAR']
els.append(expr[0])
for el in expr:
els.extend(formula_elements_lst(el, src_graph, depth=depth-1))
return els
elif anon_leaf:
return ['LEAF']
elif expr in src_graph.nodes and anon_var and \
src_graph.nodes[expr]['type'] in [nc.VarType, nc.SkolemConstType]:
return ['VAR']
return [expr]
def make_debrujin_formula(expr, src_graph, assignment=None):
if assignment == None: assignment = [0, {}]
new_tup = []
if type(expr) == tuple:
assert type(expr[0]) != tuple
for i, el in enumerate(expr):
new_tup.append(make_debrujin_formula(el, src_graph, assignment))
return tuple(new_tup)
elif expr in src_graph.nodes and src_graph.nodes[expr]['type'] == nc.VarType:
if not expr in assignment[1]:
assignment[0] += 1
assignment[1][expr] = 'VAR_' + str(assignment[0])
return assignment[1][expr]
return expr
def group_similar_tup_sizes(tuples, key_in=0, no_split=False, grp_sp=10, min_bk=10):
if no_split: return [tuples]
indiv_buckets = {}
for tup in tuples:
src = tup[key_in]
if not src in indiv_buckets: indiv_buckets[src] = []
indiv_buckets[src].append(tup)
buckets = {}
for src, tups in indiv_buckets.items():
if len(tups) <= min_bk: bucket_id = -min_bk
else: bucket_id = round(len(tups) / grp_sp)
if not bucket_id in buckets: buckets[bucket_id] = []
buckets[bucket_id].extend(tups)
return list(buckets.values())
#################
# Expression variant checking
#################
def is_alpha_equiv(conj_expr, conj_graph, prem_expr, prem_graph, depth=1000):
if conj_expr == prem_expr: return True
if var_check(conj_expr, conj_graph) == var_check(prem_expr, prem_graph):
return True
if type(conj_expr) == type(prem_expr) and \
type(conj_expr) != tuple:
# if one is variable and one isn't, return False
if var_check(conj_expr, conj_graph) != var_check(prem_expr, prem_graph):
return False
# otherwise, return True if variable and False if not
return var_check(conj_expr, conj_graph)
# this happens more than you would think...
hme = hash_matching_exprs(conj_expr, conj_graph, prem_expr, prem_graph, depth=depth)
return hme
def is_prob_iso(conj_expr, conj_graph, prem_expr, prem_graph, depth=1000):
if type(conj_expr) == type(prem_expr) and \
type(conj_expr) != tuple:
return var_check(conj_expr, conj_graph) == var_check(prem_expr, prem_graph)
if conj_expr == prem_expr: return True
hme = hash_matching_exprs(conj_expr, conj_graph, prem_expr, prem_graph,
use_labels=False, const_matching=True, ignore_ord=True,
depth=depth)
return hme
def hash_matching_exprs(conj_expr, conj_graph, prem_expr, prem_graph,
use_labels=True, const_matching=False, ignore_ord=False,
depth=1000):
# getting ent hashes here
conj_hashes, prem_hashes = {}, {}
extract_var_hashes(conj_expr, conj_graph, conj_hashes,
ignore_ord=ignore_ord, use_labels=use_labels, depth=depth)
extract_var_hashes(prem_expr, prem_graph, prem_hashes,
ignore_ord=ignore_ord, use_labels=use_labels, depth=depth)
# we require a perfect bipartite matching to be considered alpha-equivalent
if len(conj_hashes.keys()) != len(prem_hashes.keys()): return False
assignments = set()
for c_ent, c_hv in conj_hashes.items():
c_ent_f = c_ent if type(c_ent) != tuple else c_ent[0]
found = False
for p_ent, p_hv in prem_hashes.items():
if c_hv == p_hv:
# just a sanity check, probably isn't necessary
if (not const_matching) and \
var_check(c_ent, conj_graph) != var_check(p_ent, prem_graph):
return False
p_ent_f = p_ent if type(p_ent) != tuple else p_ent[0]
if const_matching or var_check(c_ent, conj_graph) or c_ent_f == p_ent_f:
found = p_ent
break
else: return False
if found == False: return False
assignments.add((c_ent, found))
del prem_hashes[p_ent]
# if we get here, prem_entity_hashes should be empty
return not prem_hashes
def extract_var_hashes(expr, graph, hashes, src_hash=None, ignore_ord=False,
use_labels=True, depth=10000):
if src_hash == None: src_hash = 0
new_tup = []
gn = graph.nodes[expr]
if (not var_check(expr, graph)) and type(expr) == tuple and depth > 0:
if use_labels: lead = (gn['label'], gn['type'], len(expr))
else: lead = (gn['type'], len(expr))
for el in expr[1:]:
# partial ordering edge labels will account for orderedness of lead
if ignore_ord: edge_hash = hash(lead)
else: edge_hash = hash((lead, graph.edges[expr, el]['label']))
new_src_hash = hash(src_hash + edge_hash)
extract_var_hashes(el, graph, hashes, new_src_hash, ignore_ord=ignore_ord,
use_labels=use_labels, depth=depth-1)
else:
if var_check(expr, graph): label = 'VAR'
else: label = gn['label']
if use_labels: lead = (label, gn['type'], 0)
else: lead = ('const', 0)
if not expr in hashes: hashes[expr] = hash(lead)
hashes[expr] += hash(src_hash + hashes[expr])
def var_check(expr, graph):
if type(expr) == tuple:
return graph.nodes[expr]['type'] in [nc.SkolemFuncType]
return graph.nodes[expr]['type'] in [nc.VarType, nc.SkolemConstType]
#################
# Variable compression
#################
def is_rn_var(expr):
return type(expr) != tuple and 'SYM_EXT' in expr
def get_var_name(v):
return v.split('_SYM_EXT_')[0]
def get_av_els(expr):
if type(expr) == tuple:
els = [expr[0]]
for el in expr: els.extend(get_av_els(el))
return els
elif is_rn_var(expr): return ['VAR']
else: return [expr]
def get_av_key(expr):
return tuple(sorted(list(get_av_els(expr))))
def var_compress_stmts(exprs):
def add_to_e_info(expr, expr_info):
expr_info[expr] = {}
subexprs, par_info = deconstruct_expr(expr)
for se in subexprs:
av_k = hash(get_av_key(se))
if not av_k in expr_info[expr]: expr_info[expr][av_k] = []
expr_info[expr][av_k].append(se)
return subexprs, par_info
expr_info, all_subexpr_info = {}, []
for expr in exprs: all_subexpr_info.append(add_to_e_info(expr, expr_info))
var_info = {}
for expr, (subexprs, par_info) in zip(exprs, all_subexpr_info):
var_info[expr] = {}
for se in subexprs:
if is_rn_var(se):
l_chains = get_ancestor_label_chains(se, par_info, depth=2, ret_all=False)
var_info[expr][se] = l_chains
#var_info[expr][se] = np.max([len(x) for x in l_chains])
#var_info[expr][se] = np.mean([len(x) for x in l_chains])
#var_info[expr][se] = np.min([len(x) for x in l_chains])
#var_info[expr][se] = len(l_chains)
wts, var_hashes = {}, {}
lg_e_ind = max([(i, max([len(get_av_key(se)) for se in se_lst]))
for i, (se_lst, _) in enumerate(all_subexpr_info)],
key=lambda x : x[1])[0]
lg_expr, lg_subexpr_info = exprs[lg_e_ind], all_subexpr_info[lg_e_ind]
expr_size = {}
for p_i, (expr, subexpr_info) in enumerate(zip(exprs, all_subexpr_info)):
all_expr_subexprs, par_info = subexpr_info
expr_subexprs = []
for se in all_expr_subexprs:
av_k = get_av_key(se)
expr_subexprs.append((av_k, hash(av_k), se))
expr_subexprs = sorted(expr_subexprs, key=lambda x : len(x[0]),
reverse=True)
expr_size[p_i] = len(expr_subexprs[0][0])
substs = []
for a_i, alt_expr in enumerate(exprs):
if a_i <= p_i: continue
else:
supp_sc, subst = find_good_subst(expr_subexprs, expr_info[alt_expr],
var_info[expr], var_info[alt_expr], var_hashes)
substs.append((supp_sc, subst, a_i))
if not p_i in wts: wts[p_i] = {}
wts[p_i][p_i] = (0., {})
for score, subst, a_i in substs:
if not a_i in wts: wts[a_i] = {}
wts[a_i][p_i] = (score, subst)
wts[p_i][a_i] = (score, dict([(v, k) for k, v in subst.items()]))
sym_matr = [[None for _ in range(len(exprs))] for _ in range(len(exprs))]
norm_c = max([wts[i][j][0] for i in range(len(exprs)) for j in range(len(exprs))])
if norm_c == 0: norm_c = 1
for i in range(len(exprs)):
sym_matr[i][i] = 1.
max_i = max([wts[i][k][0] for k in range(len(exprs))])
for j in range(len(exprs)):
if i == j: continue
max_j = max([wts[k][j][0] for k in range(len(exprs))])
sym_matr[i][j] = wts[i][j][0] / norm_c
sym_matr = 1. - np.matrix(sym_matr)
db = skcl.DBSCAN(eps=0.1, min_samples=2, metric='precomputed')
cluster_inds = db.fit_predict(sym_matr)
#cluster_inds = [1 for _ in range(len(exprs))]
clusters = {}
for i, cl in enumerate(cluster_inds):
if not cl in clusters: clusters[cl] = []
clusters[cl].append(i)
new_stmts = [None for _ in range(len(exprs))]
for cl, inds in clusters.items():
medioid = [-math.inf, -1]
for i in range(len(exprs)):
s_sum = sum([wts[i][j][0] for j in range(len(exprs))])
if s_sum > medioid[0]: medioid = [s_sum, i]
md_ind = medioid[1]
#md_ind = 0
for j in inds:
sc, subst = wts[md_ind][j]
new_expr = apply_subst(exprs[j], subst)
new_stmts[j] = new_expr
return new_stmts
def find_good_subst(expr_subexprs, alt_info, expr_par_info, alt_par_info,
var_hashes):
# build up substitution for each expression
subst, supp_by, change, nogoods = {}, [], True, set()
all_vars = set([s for a, h, s in expr_subexprs if a == ('VAR',)])
while all_vars:
best_subst = [0, subst]
for av_k, hash_av_k, subexpr in expr_subexprs:
if type(subexpr) != tuple: continue
if not hash_av_k in alt_info: continue
if not subexpr in var_hashes:
var_hashes[subexpr] = {}
get_var_hashes(subexpr, var_hashes[subexpr])
s1_hashes = var_hashes[subexpr]
for a_i, alt_subexpr in enumerate(alt_info[hash_av_k]):
if (hash_av_k, a_i) in nogoods: continue
if is_rn_var(alt_subexpr):
if get_var_name(alt_subexpr) == get_var_name(subexpr): a_sc = 2
else: a_sc = 1
a_sc = match_par_info(expr_par_info[subexpr],
alt_par_info[alt_subexpr])
else: a_sc = len(av_k)
if a_sc <= best_subst[0]: continue
if not alt_subexpr in var_hashes:
var_hashes[alt_subexpr] = {}
get_var_hashes(alt_subexpr, var_hashes[alt_subexpr])
s2_hashes = var_hashes[alt_subexpr]
fnd_subst = find_valid_subst(s1_hashes, s2_hashes, dict(subst))
if fnd_subst != False and any(not k in subst for k in fnd_subst.keys()):
best_subst = [a_sc, fnd_subst]
else: nogoods.add((hash_av_k, a_i))
# exit if nothing found
if best_subst[0] == 0: break
supp_by.append(best_subst[0])
subst = best_subst[1]
for k in subst.keys():
if k in all_vars: all_vars.remove(k)
return np.sum(supp_by), subst
def match_par_info(se1_info, se2_info):
#return 1 / (1 + abs(se1_info - se2_info))
ct = 0
for p in se1_info:
if p in se2_info:
ct += 1
return ct
def find_good_subst_2(expr_subexprs, alt_info, expr_par_info, alt_par_info,
var_hashes):
# build up substitution for each expression
subst, supp_by = {}, []
for av_k, hash_av_k, subexpr in expr_subexprs:
if type(subexpr) == tuple: continue
if not hash_av_k in alt_info: continue
if not subexpr in var_hashes:
var_hashes[subexpr] = {}
get_var_hashes(subexpr, var_hashes[subexpr])
s1_hashes = var_hashes[subexpr]
for alt_subexpr in alt_info[hash_av_k]:
if not alt_subexpr in var_hashes:
var_hashes[alt_subexpr] = {}
get_var_hashes(alt_subexpr, var_hashes[alt_subexpr])
s2_hashes = var_hashes[alt_subexpr]
fnd_subst = find_valid_subst(s1_hashes, s2_hashes, dict(subst))
if fnd_subst != False and any(not k in subst for k in fnd_subst.keys()):
subst = fnd_subst
supp_by.append(len(av_k))
return np.sum(supp_by), subst
def apply_subst(expr, subst):
if type(expr) == tuple:
new_expr = [expr[0]]
for i, el in enumerate(expr):
if i == 0: continue
new_expr.append(apply_subst(el, subst))
return tuple(new_expr)
elif expr in subst: return subst[expr] + ''
else: return expr + ''
def find_valid_subst(e1_hashes, e2_hashes, subst=None):
if subst == None: subst = {}
rev_subst = dict([(v, k) for k, v in subst.items()])
e1_hashes, e2_hashes = dict(e1_hashes), dict(e2_hashes)
# we require a perfect bipartite matching to be considered alpha-equivalent
if len(e1_hashes.keys()) != len(e2_hashes.keys()): return False
assignments = set()
for ent1, c_hv in e1_hashes.items():
found = False
for ent2, p_hv in e2_hashes.items():
if c_hv != p_hv: continue
if is_rn_var(ent1) != is_rn_var(ent2): continue
if ent1 in subst and subst[ent1] != ent2: continue
if ent2 in rev_subst and rev_subst[ent2] != ent1: continue
if is_rn_var(ent1) or ent1 == ent2:
found = ent2
break
if found == False: return False
assignments.add((ent1, found))
del e2_hashes[found]
# if we get here, prem_entity_hashes should be empty
if e2_hashes != {}: return False
for a, b in assignments: subst[a] = b
return subst
def get_var_hashes(expr, hashes, src_hash=None):
if src_hash == None: src_hash = 0
new_tup = []
if (not is_rn_var(expr)) and type(expr) == tuple:
lead = (expr[0], len(expr))
for i, el in enumerate(expr):
if i == 0: continue
# partial ordering edge labels will account for orderedness of lead
edge_hash = hash((lead, i))
new_src_hash = hash(src_hash + edge_hash)
get_var_hashes(el, hashes, new_src_hash)
else:
if is_rn_var(expr): lead = ('VAR', 0)
else: lead = (expr, 0)
if not expr in hashes: hashes[expr] = hash(lead)
hashes[expr] += hash(src_hash + hashes[expr])
#################
# Matching utilities
#################
def maximal_var_subst(paths1, paths2):
all_wts = []
for p_k, p_paths in paths1.items():
if not is_rn_var(p_k): continue
for c_k, c_paths in paths2.items():
pc_wt = get_alignment_score(p_paths, c_paths)
if pc_wt > 0: all_wts.append((p_k, c_k, pc_wt))
score_of, var_subst = 0, {}
while all_wts:
best_l, best_r, best_wt = max(all_wts, key=lambda x : x[2])
var_subst[best_l] = best_r
all_wts = [(l, r, w) for l, r, w in all_wts
if l != best_l and r != best_r]
score_of += best_wt
return score_of, var_subst
def get_alignment_score(p_paths, c_paths, cos=True):
dot_prod = sparse_dot_prod(p_paths, c_paths)
if cos:
n1 = np.sqrt(sum([pow(x[1], 2) for x in p_paths]))
n2 = np.sqrt(sum([pow(x[1], 2) for x in c_paths]))
if n1 * n2 == 0: score = 0
else: score = dot_prod / (n1 * n2)
else:
score = dot_prod
return score
def sparse_dot_prod(lst1, lst2):
dot_prod, i, j = 0, 0, 0
while i < len(lst1) and j < len(lst2):
if lst1[i][0] == lst2[j][0]:
dot_prod += lst1[i][1] * lst2[j][1]
i += 1
j += 1
elif lst1[i][0] < lst2[j][0]: i += 1
else: j += 1
return dot_prod
def get_paths_upto(set_lst, prov, path_len=3, just_syms=True, dp_form=True, all_len=True):
paths = [[s_l] for s_l in set_lst]
fin_paths = []
for i in range(path_len - 1):
new_paths = []
for p in paths:
last_el = p[-1]
if last_el in prov:
for new_el in prov[last_el]:
new_paths.append(p + [new_el])
if all_len:
fin_paths.append(p)
paths = new_paths
ret_paths = fin_paths + paths
if just_syms: ret_paths = [[(r[0] if type(r) == tuple else r) for r in p]
for p in ret_paths]
if dp_form:
d = {}
for el in ret_paths:
k = '___'.join(el)
if not k in d: d[k] = 0
d[k] += 1
return sorted(d.items(), key=lambda x : x[0])
return ret_paths
#################
# Graph utilities
#################
def topologically_group(graph):
par_dict = {}
for node in graph.nodes:
if not node in par_dict: par_dict[node] = set()
for par in graph.predecessors(node):
par_dict[node].add(par)
# should be redundant, but just in case...
for arg in graph.successors(node):
if not arg in par_dict: par_dict[arg] = set()
par_dict[arg].add(node)
# actual layers
update_layers = []
rem_nodes = list(graph.nodes) + []
while rem_nodes:
layer_nodes = [node for node in rem_nodes if not par_dict[node]]
for node in layer_nodes:
for arg in graph.successors(node):
if node in par_dict[arg]: par_dict[arg].remove(node)
rem_nodes = [node for node in rem_nodes if not node in layer_nodes]
update_layers.append(layer_nodes)
# ensures leaf nodes are in the very first layer
# and root nodes in the very last
leaf_nodes, non_leaf_nodes = [], []
for layer in reversed(update_layers):
new_layer = []
for node in layer:
if graph.out_degree(node):
new_layer.append(node)
else:
leaf_nodes.append(node)
if new_layer:
non_leaf_nodes.append(new_layer)
assert len(set([el for lst in ([leaf_nodes] + non_leaf_nodes) for el in lst])) == len(graph.nodes)
return [leaf_nodes] + non_leaf_nodes
#################
# Encoder utilities
#################
def flip_upd_layers(upd_layers):
new_upd_layers = []
restr_upd_layers = [[(a, d, e) for a, d, e in upd_layer if d != None]
for upd_layer in upd_layers]
restr_upd_layers = [upd_layer for upd_layer in restr_upd_layers if upd_layer]
desc = set([y for upd_layer in restr_upd_layers for _, y, _ in upd_layer])
asc = set([x for upd_layer in restr_upd_layers for x, _, _ in upd_layer])
roots = [(x, None, None) for x in asc.difference(desc)]
for upd_layer in reversed(restr_upd_layers):
new_upd_layers.append([(d, a, e) for a, d, e in upd_layer])
return [x for x in ([roots] + new_upd_layers) if x]
def add_zv_to_no_deps(dir_upd_layer, node_zv, edge_zv):
upd_layer = []
for src, add, edge in dir_upd_layer:
add_triple = (src, add, edge)
if add == None: add_triple = (src, node_zv, edge_zv)
upd_layer.append(add_triple)
return upd_layer
#################
# PyTorch utilities
#################
def get_adj_matr(pairs, size, is_cuda=False, mean=False, gcn_agg=None):
if is_cuda:
i = torch.cuda.LongTensor(pairs)
else:
i = torch.LongTensor(pairs)
if gcn_agg != None:
n_lst = [1 / (gcn_agg[(0, src)] * gcn_agg[(1, add)])
for src, add in pairs]
if is_cuda: v = torch.cuda.FloatTensor(n_lst)
else: v = torch.FloatTensor(n_lst)
elif mean:
src_ct = {}
for src, _ in pairs:
if not src in src_ct: src_ct[src] = 0
src_ct[src] += 1
if is_cuda:
v = torch.cuda.FloatTensor([1 / src_ct[src] for src, _ in pairs])
else:
v = torch.FloatTensor([1 / src_ct[src] for src, _ in pairs])
else:
if is_cuda:
v = torch.cuda.FloatTensor([1 for _ in range(len(pairs))])
else:
v = torch.FloatTensor([1 for _ in range(len(pairs))])
if is_cuda:
return torch.cuda.sparse.FloatTensor(i.t(), v, size)
return torch.sparse.FloatTensor(i.t(), v, size)
def compute_att_aggr(node_matr, pairs, W_q, W_k, b_q, device, softmax=True):
all_ms, at_src = [], None
for src, tgt in pairs:
if src != at_src:
if at_src != None: all_ms.append(bmm_lst)
bmm_lst, at_src = [], src
bmm_lst.append(tgt)
if at_src != None: all_ms.append(bmm_lst)
ch_lens = [len(lst) for lst in all_ms]
if not ch_lens: return None
max_len = max(ch_lens)
src_bmm_tensor = torch.tensor(uniquify([src for src, _ in pairs]),
device=device)
mask, zv_added = [], []
for lst in all_ms:
if len(lst) == max_len:
mask.append(torch.zeros(len(lst), device=device))
zv_added.append(torch.tensor(lst, device=device))
else:
zv = torch.zeros(len(lst), device=device)
ov = torch.ones(max_len - len(lst), device=device)
zo_tensor = torch.cat((zv, ov), 0)
mask.append(zo_tensor)
# this doesn't matter because we mask it anyway
padding = [0 for _ in range(max_len - len(lst))]
zv_added.append(torch.tensor(lst + padding, device=device))
mask = torch.stack(mask).unsqueeze(1)
bmm_tgt = W_k(torch.stack([node_matr.index_select(0, x) for x in zv_added]))
bmm_src = W_q(node_matr.index_select(0, src_bmm_tensor).unsqueeze(1))
att_matr = bmm_src.matmul(bmm_tgt.transpose(1, 2)) / b_q
mask_matr = att_matr.masked_fill(mask==True, float('-inf'))
if softmax: probs = nn.Softmax(dim=2)(mask_matr).squeeze(1)
else: probs = mask_matr.squeeze(1)
exp_rngs = []
for ch_len in ch_lens:
a_rng = torch.arange(ch_len, device=device)
if device == torch.device('cpu'): exp_rngs.append(torch.LongTensor(a_rng))
else: exp_rngs.append(torch.cuda.LongTensor(a_rng))
prob_matr = torch.cat([prob_m.index_select(0, exp_rng)
for prob_m, exp_rng in zip(probs, exp_rngs)], 0)
return prob_matr
#################
# Visualization
#################
def visualize_alignment(nodes1, nodes2, alignments, file_app='', col='green'):
dag = Digraph(filename=sf.vis_data_loc + file_app)
for i, nodes in enumerate([nodes1, nodes2]):
gr_name = 'base' if i == 0 else 'target'
# graph name must begin with 'cluster' for graphviz
with dag.subgraph(name='cluster_' + gr_name) as g:
g.attr(color='black')
g.attr(label=gr_name)
tsrt = reversed(topological_sort(nodes))
for layer in tsrt:
for node in layer:
n_shape = 'ellipse' if node.ordered else 'rectangle'
g.node(str(id(node)), label=node.label, shape=n_shape)
for node in nodes:
for arg in node.args:
g.edge(str(id(node)), str(id(arg)))
if col == 'green':
col_val = '0.33 '
elif col == 'blue':
col_val = '0.5 '
for prob, (n1_ind, n2_ind) in alignments:
prob = max(prob, 0)
dag.edge(str(id(nodes1[n1_ind])), str(id(nodes2[n2_ind])),
constraint='false', dir='none', color=col_val + str(prob) + ' 1')
dag.view()
def visualize_alignment_ps(nodes1, nodes2, alignments, file_app='', col='green'):
dag = Digraph(filename=sf.vis_data_loc + file_app)
dag.attr(nodesep='0.4')
dag.attr(ranksep='0.35')
for i, nodes in enumerate([nodes1, nodes2]):
tsrt = reversed(topological_sort(nodes))
for layer in tsrt:
for node in layer:
n_shape = 'ellipse'# if node.ordered else 'rectangle'
dag.node(str(id(node)), label=node.label, shape=n_shape)
for node in nodes:
for arg in node.args:
dag.edge(str(id(node)), str(id(arg)))
col_val = '0.33 '
col_val = '0.6 '
for prob, (n1_ind, n2_ind) in alignments:
prob = max(prob, 0)
dag.edge(str(id(nodes1[n1_ind])), str(id(nodes2[n2_ind])),
constraint='false', dir='none', color=col_val + str(prob) + ' 1')
dag.view()
def visualize_graph(graph, filename='graph_img'):
g = Digraph(filename=filename)
g.attr(color='black')
#g.attr(label=gr_name)
tsrt = reversed(topologically_group(graph))
good_nodes = set()
for node in graph.nodes:
for arg in graph.successors(node):
edge_label = graph.edges[node, arg]['label']
if ':' in edge_label[0] or 'word_node' in node:
good_nodes.add(node)
good_nodes.add(arg)
for layer in tsrt:
for node in layer:
if not node in good_nodes: continue
n_shape = 'ellipse'
#label_is = str(graph.nodes[node]['label']).replace('-','=').replace('.','dt').replace('/\\', '&')
label_is = str(graph.nodes[node]['label']).replace('/\\', '&')
if len(label_is) == 1 and (not label_is == '&') and list(graph.successors(node)):
label_is = 'amr-' + label_is
g.node(str(hash(node)), label=label_is, shape=n_shape)
for node in graph.nodes:
if not node in good_nodes: continue
for arg in graph.successors(node):
edge_label = graph.edges[node, arg]['label']
edge_label = edge_label if edge_label[0] == ':' else ''
if edge_label == ':pred-is-named': edge_label = ':word'
#edge_label = ''
g.edge(str(hash(node)), str(hash(arg)), label=edge_label)
try: g.view()
except: pass
#################
# Language Utilities
#################
def parse_nl_stmt(stmt, spacy_map, amr_map):
#doc = nlp(stmt)
doc = spacy_map[stmt]
graph, tok_map = convert_to_graph(doc)
amr_s_exprs = get_amr_graph(stmt, amr_map)
s_exprs = convert_graph_to_s_exprs(graph)
s_exprs = [expr for expr in s_exprs if not ('pos_' in expr[0] or 'word_node' in expr[0])]
s_exprs = [(expr[1] if expr[0] == 'end_sent' else expr) for expr in s_exprs ]
s_expr = tuple(['/\\'] + amr_s_exprs + s_exprs)
sent = [incl_pos(tok_map[ind_func(tok)]) for sent in doc.sents for tok in sent]
return s_expr, sent
TOK_SP = '_ID_POS_PT_'
def incl_pos(arg):
return arg.label.lower()
return arg.label.lower() + TOK_SP + '_'.join([str(x) for x in arg.position])
def convert_graph_to_s_exprs(graph):
s_exprs = []
for gr_expr in graph:
s_exprs.append(convert_graph_to_s_expr(gr_expr))
return s_exprs
def convert_graph_to_s_expr(gr_expr):
new_label = gr_expr.label.lower()
new_args = [convert_graph_to_s_expr(a) for a in gr_expr.arguments]
if new_args: return tuple([new_label] + new_args)
return new_label
def convert_to_graph(doc):
graphs = []
tok_map = {}
for s_num, sentence in enumerate(doc.sents):
for t_num, tok in enumerate(sentence):
if not ind_func(tok) in tok_map:
tok_map[ind_func(tok)] = ParseNode(canon_str(tok))
tok_map[ind_func(tok)].position = (s_num, t_num)
prev_node = None
for tok, tok_node in tok_map.items():
s_num, t_num = tok_node.position
pos_node = ParseNode('pos_' + str(t_num))
#new_node = ParseNode('word_node', [tok_node, pos_node])
#new_node = ParseNode('pos_' + str(t_num), [tok_node] + ([prev_node] if prev_node else []))
new_node = ParseNode('word_node', [tok_node] + ([prev_node] if prev_node else []))
prev_node = new_node
graphs.append(new_node)
graphs.append(ParseNode('end_sent', [prev_node]))
# unary token features
#graphs.extend(get_sp_graph(list(doc), tok_map))
graphs.extend(get_fine_tag_graph(list(doc), tok_map))
# dependency features
graphs.extend(get_dep_graph(doc, constr_rels=set(['punct']), tok_nodes=tok_map))
return graphs, tok_map
def ind_func(tok):
#return tok.text
return tok
def canon_str(tok):
r_label = tok.text.lower()
#r_label = tok.lemma_.lower()
return r_label
def lemma_str(tok):
r_label = tok.lemma_.lower()
return r_label
def skip_tok(tok):
return tok.is_punct or tok.is_stop
def is_ann_tok(tok):
if tok.pos_ in 'ADV' and tok.text[-2:] == 'er':
return True
return tok.pos_ in ['NOUN', 'VERB']
def is_comp_tok(tok):
return tok.pos_ in 'ADV' and tok.text[-2:] == 'er'
def get_sp_graph(doc, tok_map=None):
if tok_map == None: tok_map = {}
nodes = []
for i in range(len(doc)):
tok = doc[i]
t_node = tok_map[ind_func(tok)]
# entity type
if tok.ent_type_:
et_node = ParseNode(tok.ent_type_ + '_Ent_Type', [t_node])
t_node.parents.append(et_node)
nodes.append(et_node)
return nodes
def get_fine_tag_graph(doc, tok_map=None):
if tok_map == None: tok_map = {}
nodes = []
for i in range(len(doc)):
tok = doc[i]
t_node = tok_map[ind_func(tok)]
if is_ann_tok(tok):
tag_node = ParseNode(tok.tag_ + '_Fine_Pos', [t_node])
t_node.parents.append(tag_node)
nodes.append(tag_node)
return nodes
def get_amr_graph(sent, amr_map):
graph = amr_map[sent]
amr_tuple = parse_amr_str(graph)
return amr_tuple
def parse_amr_str(graph_str):
lines = graph_str.split('\n')
node_map = {}
for l in lines:
if '::tok' in l: toks = l.split()[2:]
elif '::node' in l:
comp = l.split()
try:
node_info = comp[3]
node_span = [int(x) for x in comp[4].split('-')]
node_tok = '-'.join(toks[node_span[0] : node_span[1]]).lower()
node_map[node_info.lower()] = node_tok
except: pass
use_str = ' '.join([l for l in lines if l and l[0] != '#'])
use_str = [el for el_str in use_str.split() for el in re.split('(\(|\))', el_str)]
use_str_lst = [el for el in use_str if el]
amr_tup = parse_amr_lst(use_str_lst, node_map)
#print(graph_str)
#print(amr_tup)
#input()
return amr_tup
def parse_amr_lst(toks, node_map):
stack, add_lst, seen_dict = [], [], {}
for tok in toks:
if tok == '(':
stack.append(add_lst)
add_lst = []
elif tok == ')':
assert len(stack) > 0, 'Imbalanced parentheses:\n' + sexpr_str
assert add_lst, 'Empty list found:\n' + sexpr_str
old_expr = reformat_amr_expr(add_lst, node_map)
if not old_expr in seen_dict: seen_dict[old_expr] = old_expr
old_expr = seen_dict[old_expr]
add_lst = stack.pop()
add_lst.append(old_expr)
else:
add_lst.append(tok.lower())
assert len(add_lst) == 1
return add_lst
def reformat_amr_expr(lst, node_map):
assert lst[1] == '/'
pred = lst[2]
new_lst = [(':amr-name', pred)]
if pred in node_map: new_lst.append((':orig-word', node_map[pred]))
i = 3
while i < len(lst):
j = i + 1
while j < len(lst) and lst[j][0] != ':': j += 1
arg_pt = lst[i]
splt = lst[i + 1 : j]
if len(splt) == 1: arg_n = splt[0]
else: arg_n = '_'.join(splt)
new_lst.append((arg_pt, arg_n))
i = j
#return tuple([lst[0]] + new_lst)
return tuple(['amr_rel'] + new_lst)
def get_dep_graph(doc, tok_nodes=None, constr_rels=None):
if constr_rels == None: constr_rels = set()
if tok_nodes == None: tok_nodes = {}
sentences = list(doc.sents)
unprocessed = [sentence.root for sentence in sentences]
seen = set()
fin_graph = []
while unprocessed:
tok = unprocessed.pop()
tok_node = tok_nodes[ind_func(tok)]
seen.add(tok)
# dependency information
for child in tok.children:
dep_label = child.dep_ + '_dep_info'
ch_node = tok_nodes[ind_func(child)]
dep_node = ParseNode(dep_label, [tok_node, ch_node])
l_dep_node = ParseNode(dep_label + '_1', [tok_node])
r_dep_node = ParseNode(dep_label + '_2', [ch_node])
if not (child.dep_ in constr_rels or \
any(dep in child.dep_ for dep in constr_rels)):
tok_node.parents.append(dep_node)
ch_node.parents.append(dep_node)
fin_graph.append(dep_node)
if not child in seen:
unprocessed.append(child)
vals = list(set(fin_graph))
return vals
class ParseNode:
def __init__(self, label, arguments=None, parents=None, ordered=True, position=None):
if arguments == None: arguments = []
if parents == None: parents = []
self.position = position
self.label = label
self.arguments = arguments
self.parents = parents
self.ordered = ordered
def keyForm(self):
return (self.label, len(self.arguments), len(self.parents) > 0, self.ordered)
def __str__(self):
args_str = ''
if self.arguments:
args_str = '(' + ', '.join([str(x) for x in self.arguments]) + ')'
return self.label + args_str
def __repr__(self):
return str(self)
| StarcoderdataPython |
1738469 | #!/usr/bin/python
# -*- coding: utf-8 -*-
config = {
'LOCALE': 'en',
'LOCALES_DIR': 'static/locales',
'ROOT_PATH': None,
'GOOGLEMAPS_KEY': '<KEY>'
}
| StarcoderdataPython |
3396732 | <gh_stars>1-10
from nonebot import on_command, CommandSession
from nonebot.permission import PRIVATE
from .utils import make_dragon
ERROR_MSG = "输入不合规。请重新输入。"
@on_command('dragonmaker', aliases=('造龙', '生成龙图'), only_to_me=False, permission=PRIVATE)
async def dragonmaker(session: CommandSession):
# 从会话状态中获取需要龙化的图片,如果当前不存在,则询问用户
base = session.get('base', prompt='请向机器人发送一张背景图片。')
# 获取位置
pos = session.get('pos', prompt='请输入龙在背景图中的位置x y,用空格分隔。以左上角为原点,' + \
'x和y代表在背景图中长和宽的位置比,范围是[0, 1]。例如0 0代表左上, 0.5 0.5代表中心。')
# 获取龙图大小
size = session.get('size', prompt='请输入龙的相对大小,即龙图长与背景图长的比值,范围是(0, 1]。')
# 向用户发送生成的龙图
dragon = await make_dragon(base, pos, size)
await session.send(dragon)
@dragonmaker.args_parser
async def _(session: CommandSession):
arg = session.current_arg
if session.current_key == 'base':
if 'CQ:image' in arg and 'gif' not in arg:
session.state['base'] = session.current_arg_images[0]
else:
session.pause('请发送一张图片。(暂不支持gif)')
if session.current_key == 'pos':
# x y
pos_list = session.current_arg_text.split(" ")
if len(pos_list) != 2:
session.pause(ERROR_MSG)
try:
x = float(pos_list[0])
y = float(pos_list[1])
except ValueError:
session.pause(ERROR_MSG)
if (x < 0 or x > 1 or y < 0 or y > 1):
session.pause(ERROR_MSG)
session.state['pos'] = {'width': x, 'height': y}
if session.current_key == 'size':
size = session.current_arg_text.strip()
try:
x = float(size)
except ValueError:
session.pause(ERROR_MSG)
if x <= 0 or x > 1:
session.pause(ERROR_MSG)
session.state['size'] = x | StarcoderdataPython |
150459 | <gh_stars>0
MASTER_NAME = 'localhost:9090'
MASTER_AUTH = ('<PASSWORD>', 'password')
TEST_MONITOR_SVC_URLS = dict(
base='http://{0}/nitestmonitor',
base_sans_protocol='{0}://{1}/nitestmonitor',
can_write='/v2/can-write',
query_results='/v1/query-results',
query_results_skip_take='/v1/query-results?skip={0}&take={1}',
create_results='/v2/results',
update_results='/v2/results',
delete_result='/v2/results/{0}',
query_steps='/v1/query-steps',
query_steps_skip_take='/v1/query-steps?skip={0}&take={1}',
create_steps='/v2/steps',
delete_step='/v2/steps/{0}',
delete_steps='/v2/delete-steps',
delete_results='/v2/delete-results',
list_report_files='/v2/reports',
upload_report_for_result='/v2/results/{0}/upload',
attach_report_to_result='/v2/results/{0}/attach',
download_report='/v2/reports/{0}',
delete_report='/v2/reports/{0}'
)
| StarcoderdataPython |
1613872 | <gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path('recommend/', views.get_similar_recommendation, name='recommend'),
] | StarcoderdataPython |
3388527 | #!/usr/bin/env python
'''This script is developed to define and load a mission in a fixed wing UAV using the
ual_backend_mavros_fw. Firstly, it has to be executed roslaunch ual_backend_mavros_fw simulations.launch'''
import rospy, std_msgs, std_srvs
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
from uav_abstraction_layer.srv import SetMission, SetMissionRequest, SetMissionResponse
from uav_abstraction_layer.msg import MissionElement, ParamFloat
class MissionLoader():
def __init__(self):
# Wait until service is available and creat connection
rospy.wait_for_service('/ual/set_mission')
self._setmission_service = rospy.ServiceProxy('/ual/set_mission', SetMission)
mission_wps = self.define_mission()
self.send_mission(mission_wps)
def define_mission(self):
wps = []
header_map = std_msgs.msg.Header()
header_map.frame_id = "map"
'''LAND POSE'''
landing_phase = MissionElement()
landing_phase.type = MissionElement.LAND_POSE
landing_phase.waypoints = [PoseStamped(header_map,Pose(Point(100,0,10),Quaternion(0,0,0,1)))]
landing_phase.waypoints.append(PoseStamped(header_map,Pose(Point(0,0,0),Quaternion(0,0,0,1))))
landing_phase.params = self.dictToListOfParamFloat({"loit_heading": 0.0, "loit_radius": 0.0,
"loit_forward_moving": 1.0,"abort_alt": 0.0, "precision_mode": 0.0})
wps.append(landing_phase)
return wps
def send_mission(self, wps):
request = SetMissionRequest()
request.mission_elements = wps
request.blocking = True
response = self._setmission_service(request)
return response
def dictToListOfParamFloat(self, dict):
paramfloat_list = []
for key in dict.keys():
paramfloat_list.append(ParamFloat(key, dict[key]))
return paramfloat_list
''' The node and the MissionLoader class are initialized'''
if __name__=='__main__':
rospy.init_node('mission_loader', anonymous=True)
m = MissionLoader()
| StarcoderdataPython |
4830405 | # coding=utf8
from . import record_utils as ru
EDITABLE_INPUT_CONNECTION_TAG = '[EditableInputConnection]'
SPANNER_STRING_BUILDER_TAG = '[SpannerStringBuilder]'
TEXT_VIEW_KEY_TAG = '[TextViewKeyboard]'
# Keyboard Action
def instrument_EditableInputConnection():
hook_code = """
Java.perform(function(){
var BaseInputConnection = Java.use('android.view.inputmethod.BaseInputConnection');
BaseInputConnection.$init.overload('android.view.View', 'boolean').implementation = function(view, fullEditor){
console.log('[BaseInputConnection]...');
console.log(this.$className);
console.log(this.mBeginBatchEdit);
return this.$init(view, fullEditor);
};
var EditableInputConnection = Java.use('com.android.internal.widget.EditableInputConnection');
EditableInputConnection.beginBatchEdit.implementation = function(){
var timestamp = +new Date()/1000;
send({
msgType: 'type',
target: 'EditableInputConnection',
event: 'beginBatchEdit',
mBatchEditNesting: this.mBatchEditNesting.value,
editableInputConnection: ''+this,
TextView: ''+this.mTextView.value,
TextViewHandle: this.mTextView.value.$handle,
TextViewClassname: this.mTextView.value.$className,
TextViewPositionInScreen: this.mTextView.value.getLocationOnScreen(),
TextViewWidth: this.mTextView.value.getWidth(),
TextViewHeight: this.mTextView.value.getHeight(),
TextViewX: this.mTextView.value.getX(),
TextViewY: this.mTextView.value.getY(),
TextViewId: this.mTextView.value.getId(),
timestamp: timestamp
});
return this.beginBatchEdit();
};
EditableInputConnection.performEditorAction.implementation = function(actionCode){
var timestamp = +new Date()/1000;
send({
msgType: 'type',
target: 'EditableInputConnection',
event: 'performEditorAction',
editableInputConnection: ''+this,
msgType: 'keyEvent',
mBatchEditNesting: this.mBatchEditNesting.value,
actionCode: actionCode,
TextView: ''+this.mTextView.value,
TextViewHandle: this.mTextView.value.$handle,
TextViewClassname: this.mTextView.value.$className,
TextViewPositionInScreen: this.mTextView.value.getLocationOnScreen(),
TextViewWidth: this.mTextView.value.getWidth(),
TextViewHeight: this.mTextView.value.getHeight(),
TextViewX: this.mTextView.value.getX(),
TextViewY: this.mTextView.value.getY(),
TextViewId: this.mTextView.value.getId(),
timestamp: timestamp
});
return this.performEditorAction(actionCode);
};
});
"""
return hook_code
def get_instrument_EditableInputConnection_message(plid, package, fd):
@ru.error_handler
def wrapper(message, data):
ru.log(EDITABLE_INPUT_CONNECTION_TAG, message, fd, plid, package)
return wrapper
def instrument_onKeyPreIme():
hook_code = """
Java.perform(function(){
var TextView = Java.use('android.widget.TextView');
TextView.onKeyPreIme.implementation = function(keyCode, keyEvent){
var actionCode = keyEvent.getAction();
var deviceId = keyEvent.getDeviceId();
var detail = keyEvent.toString();
var timestamp = +new Date()/1000;
send({
keyCode: keyCode,
downTime: keyEvent.getDownTime(),
actionCode: actionCode,
detail: detail,
deviceId: deviceId,
target: 'TextView',
msgType: 'keyEvent',
viewId: this.getId(),
timestamp: timestamp
});
return this.onKeyPreIme(keyCode, keyEvent);
};
var AutoCompleteTextView = Java.use('android.widget.AutoCompleteTextView');
AutoCompleteTextView.onKeyPreIme.implementation = function(keyCode, keyEvent){
var actionCode = keyEvent.getAction();
var deviceId = keyEvent.getDeviceId();
var detail = keyEvent.toString();
var timestamp = +new Date()/1000;
send({
keyCode: keyCode,
downTime: keyEvent.getDownTime(),
actionCode: actionCode,
detail: detail,
deviceId: deviceId,
target: 'AutoCompleteTextView',
msgType: 'keyEvent',
viewId: this.getId(),
timestamp: timestamp
});
return this.onKeyPreIme(keyCode, keyEvent);
};
});
"""
return hook_code
def get_instrument_onKeyPreIme_message(plid, package, fd):
@ru.error_handler
def wrapper(message, data):
ru.log(TEXT_VIEW_KEY_TAG, message, fd, plid, package)
return wrapper
# Editable String
def instrument_SpannableStringBuilder():
hook_code = """
Java.perform(function(){
var spannerString = Java.use('android.text.SpannableStringBuilder');
spannerString.toString.implementation = function(){
var timestamp = +new Date()/1000;
var string = this.toString();
var mIndexOfSpan = this.mIndexOfSpan.value;
var address = null;
if(mIndexOfSpan !== null){
address = this.mIndexOfSpan.value.hashCode();
}
send({
target: 'Editable',
msgType: 'text',
text: string,
classname: this.$className,
handle: this.$handle,
address: address,
timestamp: timestamp
});
return string;
};
});
"""
return hook_code
def get_instrument_SpannableStringBuilder_message(plid, package, fd):
@ru.error_handler
def wrapper(message, data):
ru.log(SPANNER_STRING_BUILDER_TAG, message, fd, plid, package)
return wrapper | StarcoderdataPython |
93731 | <reponame>lucasmello/Driloader
# pylint: disable=too-few-public-methods
"""
driloader.factories.browser_factory
Module which abstracts the browser instantiations.
"""
from driloader.browser.chrome import Chrome
from driloader.browser.drivers import Driver
from driloader.browser.exceptions import BrowserNotSupportedError
from driloader.browser.firefox import Firefox
from driloader.browser.internet_explorer import IE
class BrowserFactory:
"""
Provides the right instance based on browser name.
"""
def __init__(self, browser_name):
self._browser_name = browser_name
self.browser = self._get_browser()
def _get_browser(self):
"""
Get browser's instance according to browser's name.
:return:
"""
if self._browser_name.upper() == 'CHROME':
driver = Driver()
driver.browser = 'chrome'
return Chrome(driver)
if self._browser_name.upper() == 'FIREFOX':
driver = Driver()
driver.browser = 'firefox'
return Firefox(driver)
if self._browser_name.upper() == 'IE':
driver = Driver()
driver.browser = 'ie'
return IE(driver)
raise BrowserNotSupportedError('Sorry, but we currently not'
' support your Browser.',
'Browser is not supported.')
| StarcoderdataPython |
4833129 | <filename>lib/matplotlib/table.py
"""
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : <NAME> <<EMAIL>>
Copyright : 2004 <NAME> and <NAME>
License : matplotlib license
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
from . import artist
from .artist import Artist, allow_rasterization
from .patches import Rectangle
from .cbook import is_string_like
from matplotlib import docstring
from .text import Text
from .transforms import Bbox
from matplotlib.path import Path
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor)
self.set_clip_on(False)
# Create text object
if loc is None:
loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l, b, w, h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class CustomCell(Cell):
"""
A subclass of Cell where the sides may be visibly toggled.
"""
_edges = 'BRTL'
_edge_aliases = {'open': '',
'closed': _edges, # default
'horizontal': 'BT',
'vertical': 'RL'
}
def __init__(self, *args, **kwargs):
visible_edges = kwargs.pop('visible_edges')
Cell.__init__(self, *args, **kwargs)
self.visible_edges = visible_edges
@property
def visible_edges(self):
return self._visible_edges
@visible_edges.setter
def visible_edges(self, value):
if value is None:
self._visible_edges = self._edges
elif value in self._edge_aliases:
self._visible_edges = self._edge_aliases[value]
else:
for edge in value:
if edge not in self._edges:
msg = ('Invalid edge param {0}, must only be one of'
' {1} or string of {2}.').format(
value,
", ".join(self._edge_aliases.keys()),
", ".join(self._edges),
)
raise ValueError(msg)
self._visible_edges = value
def get_path(self):
'Return a path where the edges specificed by _visible_edges are drawn'
codes = [Path.MOVETO]
for edge in self._edges:
if edge in self._visible_edges:
codes.append(Path.LINETO)
else:
codes.append(Path.MOVETO)
if Path.MOVETO not in codes[1:]: # All sides are visible
codes[-1] = Path.CLOSEPOLY
return Path(
[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]],
codes,
readonly=True
)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best': 0,
'upper right': 1, # default
'upper left': 2,
'lower left': 3,
'lower right': 4,
'center left': 5,
'center right': 6,
'lower center': 7,
'upper center': 8,
'center': 9,
'top right': 10,
'top left': 11,
'bottom left': 12,
'bottom right': 13,
'right': 14,
'left': 15,
'top': 16,
'bottom': 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None, **kwargs):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on '
'bottom; valid locations are\n%s\t' %
(loc, '\n\t'.join(six.iterkeys(self.codes))))
loc = 'bottom'
if is_string_like(loc):
loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._edges = None
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self.update(kwargs)
self.set_clip_on(False)
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0, 0)
cell = CustomCell(xy, visible_edges=self.edges, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
@property
def edges(self):
return self._edges
@edges.setter
def edges(self, value):
self._edges = value
def _approx_text_height(self):
return (self.FONTSIZE / 72.0 * self.figure.dpi /
self._axes.bbox.height * 1.2)
@allow_rasterization
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one
# will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible():
return
renderer.open_group('table')
self._update_positions(renderer)
keys = list(six.iterkeys(self._cells))
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
# for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.contains(mouseevent.x, mouseevent.y), {}
else:
return False, {}
def get_children(self):
'Return the Artists contained by the table'
return list(six.itervalues(self._cells))
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [cell.get_window_extent(renderer)
for cell in six.itervalues(self._cells)]
return Bbox.union(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in six.iteritems(self._cells):
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = list(six.iterkeys(widths))
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = list(six.iterkeys(heights))
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in six.iteritems(self._cells):
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = list(six.itervalues(self._cells))[0].get_fontsize()
cells = []
for key, cell in six.iteritems(self._cells):
# ignore auto-sized columns
if key[1] in self._autoColumns:
continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in six.itervalues(self._cells):
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in six.itervalues(self._cells):
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in six.itervalues(self._cells):
cell.set_fontsize(size)
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in six.itervalues(self._cells):
x, y = c.get_x(), c.get_y()
c.set_x(x + ox)
c.set_y(y + oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l, b, w, h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw / w, rh / h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = list(xrange(len(self.codes)))
# defaults for center
ox = (0.5 - w / 2) - l
oy = (0.5 - h / 2) - b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5 - w / 2) - l
if self._loc in (CL, CR, C): # center y
oy = (0.5 - h / 2) - b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None, edges='closed',
**kwargs):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None, edges='closed')
Factory function to generate a Table instance.
Thanks to <NAME> for providing the class and table.
"""
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * rows] * cols
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
if len(row) != cols:
msg = "Each row in 'cellText' must have {0} columns"
raise ValueError(msg.format(cols))
if cellColours is not None:
if len(cellColours) != rows:
raise ValueError("'cellColours' must have {0} rows".format(rows))
for row in cellColours:
if len(row) != cols:
msg = "Each row in 'cellColours' must have {0} columns"
raise ValueError(msg.format(cols))
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0 / cols] * cols
# Fill in missing information for column
# and row labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * rows
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
if len(rowLabels) != rows:
raise ValueError("'rowLabels' must be of length {0}".format(rows))
# If we have column labels, need to shift
# the text and colour arrays down 1 row
offset = 1
if colLabels is None:
if colColours is not None:
colLabels = [''] * cols
else:
offset = 0
elif colColours is None:
colColours = 'w' * cols
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox, **kwargs)
table.edges = edges
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row + offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row + offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
docstring.interpd.update(Table=artist.kwdoc(Table))
| StarcoderdataPython |
107315 | import json
person = {'name':'John','age':28,'city':'New York','hasChildren':False}
personJson = json.dumps(person,indent=4,separators=(':','='),sort_keys=True)
print(personJson)
with open('res/person.json', 'w') as f:
json.dump(person,f,indent=4)
person = {'name':'John','age':28,'city':'New York','hasChildren':False}
personJson = json.dumps(person)
person_new = json.loads(personJson)
print(person_new)
with open('res/person.json', 'r') as f:
person = json.load(f)
print(json)
class User:
def __init__(self,name,age):
self.name = name
self.age = age
user = User('albert',29)
def encode_user(o):
if isinstance(o,User):
return {'name':o.name,'age':o.age,o.__class__.__name__:True}
else:
raise TypeError('is not JSON serializable')
userJSON = json.dumps(user,default=encode_user)
print(userJSON)
from json import JSONEncoder
class UserEncoder(JSONEncoder):
def default(self,o):
if isinstance(o,User):
return {'name':o.name,'age':o.age,o.__class__.__name__:True}
userJSON = json.dumps(user,cls=UserEncoder)
print(userJSON)
userJSON2 = UserEncoder().encode(user)
print(userJSON2)
def decode_user(dct):
if User.__name__ in dct:
return User(name=dct['name'],age=dct['age'])
return dct
user = json.loads(userJSON,object_hook=decode_user)
print(type(user))
print(user.name) | StarcoderdataPython |
70802 | class NSGA2:
def __init__(self, initializer, evaluator, selector, crossover, mutator, stopper):
self.initializer = initializer
self.evaluator = evaluator
self.selector = selector
self.crossover = crossover
self.mutator = mutator
self.stopper = stopper
self.population = None
self.population_log = []
def make_phenotype(self, population):
for individual in population.values():
individual.evaluate()
def make_next_population(self, champions):
next_population = {}
for parent_a, parent_b in zip(champions[::2], champions[1::2]):
child_a, child_b = self.crossover.crossover(parent_a, parent_b)
next_population[child_a.individual_id] = child_a
next_population[child_b.individual_id] = child_b
for individual in next_population.values():
self.mutator.mutate(individual)
return next_population
def search(self, verbose=False):
if verbose:
print("Initialize population...")
population = self.initializer.initialize()
self.make_phenotype(population)
self.evaluator.evaluate(population)
self.population_log.append(population)
if verbose:
print("Run search...")
interrupt = False
while not self.stopper.stop(population):
try:
champions = self.selector.select(population)
next_population = self.make_next_population(champions)
self.make_phenotype(next_population)
self.evaluator.evaluate(next_population)
population = next_population
self.population_log.append(population)
except KeyboardInterrupt:
if verbose:
print("Search interrupted...")
break
if verbose:
print("Terminating search...")
self.population = population
return population
| StarcoderdataPython |
4802878 | <gh_stars>1-10
from pl_bolts.models.vision.pixel_cnn import PixelCNN # noqa: F401
from pl_bolts.models.vision.segmentation import SemSegment # noqa: F401
from pl_bolts.models.vision.unet import UNet # noqa: F401
| StarcoderdataPython |
3263451 | import re
from django import forms
from django.http import Http404
from django.urls import reverse
from django.db.models import Q, Case, When, IntegerField, F
from django.shortcuts import render, get_object_or_404, redirect
from django.db.models.functions import Lower
from texts.search_fields import get_search_fields
from coptic.settings.base import DEPRECATED_URNS
from collections import OrderedDict
import texts.urn as urnlib
import texts.models as models
import texts.urn
import base64
from django.template.defaulttags import register
@register.filter(name='keyvalue')
def keyvalue(dict, key):
return dict.get(key)
def home_view(request):
'Home'
context = _base_context()
return render(request, 'home.html', context)
def corpus_view(request, corpus=None):
corpus_object = get_object_or_404(models.Corpus, slug=corpus)
# This is almost what we need, but because of some ORM quirks (LEFT OUTER JOINs where we needed INNER JOINs)
# every text with a valid `order` metadatum will appear twice in these results: once with an "order" annotation,
# and once without.
texts = (
models.Text.objects
.filter(corpus=corpus_object)
.annotate(order=Case(
When(text_meta__name="order", then="text_meta__value"),
output_field=IntegerField()
))
.distinct()
.order_by("order", "id")
)
# to handle this, for every id, take the one with an "order" if it has one, else fall back to the one without order
ids = set([t.id for t in texts])
results = []
for tid in ids:
no_order_match = [t for t in texts if t.id == tid and t.order is None]
order_match = [t for t in texts if t.id == tid and t.order is not None]
if len(order_match) == 0:
# Some corpora, like urn:cts:copticLit:shenoute.those, have only partial orderings--in this case, put the unordered ones last
no_order_match[0].order = 999999
results += no_order_match
else:
results += order_match
results = sorted(results, key=lambda t: (t.order, t.id))
texts = results
context = _base_context()
context.update({
'corpus': corpus_object,
'texts': texts
})
return render(request, 'corpus.html', context)
def text_view(request, corpus=None, text=None, format=None):
text_object = get_object_or_404(models.Text, slug=text)
if not format:
visualization = text_object.html_visualizations.all()[0]
format = visualization.visualization_format.slug
return text_view(request, corpus=corpus, text=text, format=format)
visualization = text_object.html_visualizations.get(visualization_format__slug=format)
doc_urn = text_object.text_meta.get(name="document_cts_urn").value
text_object.edition_urn = doc_urn
text_object.urn_cts_work = texts.urn.cts_work(doc_urn)
text_object.textgroup_urn = texts.urn.textgroup_urn(doc_urn)
text_object.corpus_urn = texts.urn.corpus_urn(doc_urn)
text_object.text_url = "texts/" + text_object.corpus.slug + "/" + text_object.slug
try:
next_text_urn = text_object.text_meta.get(name="next").value.strip()
slug = models.Text.objects.get(text_meta__name="document_cts_urn", text_meta__value=next_text_urn).slug
text_object.next = slug
except (models.TextMeta.DoesNotExist, models.Text.DoesNotExist):
pass
try:
previous_text_urn = text_object.text_meta.get(name="previous").value.strip()
slug = models.Text.objects.get(text_meta__name="document_cts_urn", text_meta__value=previous_text_urn).slug
text_object.previous = slug
except (models.TextMeta.DoesNotExist, models.Text.DoesNotExist):
pass
try:
text_object.endnote = text_object.text_meta.get(name="endnote").value
except (models.TextMeta.DoesNotExist, models.Text.DoesNotExist):
pass
context = _base_context()
context.update({
'text': text_object,
'visualization': visualization,
'format': format
})
return render(request, 'text.html', context)
def not_found(request):
return render(request, '404.html', {})
def _resolve_urn(urn):
try:
text = models.Text.objects.get(text_meta__name="document_cts_urn", text_meta__value=urn)
return text
except models.Text.DoesNotExist:
try:
corpus = models.Corpus.objects.get(urn_code=urn)
return corpus
except models.Corpus.DoesNotExist:
return None
def urn(request, urn=None):
# https://github.com/CopticScriptorium/cts/issues/112
if re.match(r'urn:cts:copticLit:ot.*.crosswire', urn):
return redirect('https://github.com/CopticScriptorium/corpora/releases/tag/v2.5.0')
# check to see if the URN is deprecated and redirect if so
urn = DEPRECATED_URNS.get(urn, urn)
obj = _resolve_urn(urn)
if obj.__class__.__name__ == "Text":
return redirect('text', corpus=obj.corpus.slug, text=obj.slug)
elif obj.__class__.__name__ == "Corpus":
return redirect('corpus', corpus=obj.slug)
return redirect(reverse('search') + f"?text={urn}")
def get_meta_values(meta):
unsplit_values = map(lambda x: x['value'], models.TextMeta.objects.filter(name__iexact=meta.name).values("value").distinct())
if not meta.splittable:
meta_values = unsplit_values
else:
sep = "; " if str(meta.name) in ["places","people"] else ", "
split_meta_values = [v.split(sep) for v in unsplit_values]
for i, vals in enumerate(split_meta_values):
if any(len(v) > 50 for v in vals) and sep == ", ": # e.g. long translation value with comma somewhere
split_meta_values[i] = [", ".join(vals)]
meta_values = set()
for vals in split_meta_values:
meta_values = meta_values.union(set(vals))
meta_values = sorted(list(set(v.strip() for v in meta_values)))
meta_values = [re.sub(HTML_TAG_REGEX, '', meta_value) for meta_value in meta_values]
return meta_values
def index_view(request, special_meta=None):
context = _base_context()
value_corpus_pairs = OrderedDict()
meta = get_object_or_404(models.SpecialMeta, name=special_meta)
meta_values = get_meta_values(meta)
b64_meta_values = {}
b64_corpora = {}
all_corpora = set([])
for meta_value in meta_values:
b64_meta_values[meta_value] = str(base64.b64encode(('identity="'+meta_value+'"').encode("ascii")).decode("ascii"))
if meta.splittable:
corpora = (models.Text.objects.filter(text_meta__name__iexact=meta.name,
text_meta__value__icontains=meta_value)
.values("corpus__slug", "corpus__title", "corpus__id", "corpus__urn_code", "corpus__annis_corpus_name")
.distinct())
else:
corpora = (models.Text.objects.filter(text_meta__name__iexact=meta.name,
text_meta__value__iexact=meta_value)
.values("corpus__slug", "corpus__title", "corpus__id", "corpus__urn_code", "corpus__annis_corpus_name")
.distinct())
value_corpus_pairs[meta_value] = []
for c in sorted(corpora,key=lambda x: x['corpus__title']):
try:
authors = map(lambda x: x.text_meta.get(name__iexact="author").value,
models.Text.objects.filter(corpus__id=c["corpus__id"]))
authors = list(set(authors))
if len(authors) == 0:
author = None
elif len(authors) == 1:
author = authors[0]
elif len(authors) < 3:
author = ", ".join(authors)
else:
author = "multiple"
except models.TextMeta.DoesNotExist:
author = None
value_corpus_pairs[meta_value].append({
"slug": c['corpus__slug'],
"title": c['corpus__title'],
"urn_code": c['corpus__urn_code'],
"author": author,
"annis_corpus_name": c["corpus__annis_corpus_name"]
})
b64_corpora[c["corpus__annis_corpus_name"]] = str(base64.b64encode(c["corpus__annis_corpus_name"].encode("ascii")).decode("ascii"))
all_corpora.add(c["corpus__annis_corpus_name"])
value_corpus_pairs[meta_value].sort(key=lambda x:x["title"])
annis_corpora = ",".join(list(all_corpora))
annis_corpora = str(base64.b64encode(annis_corpora.encode("ascii")).decode("ascii"))
context.update({
'special_meta': meta.name,
'value_corpus_pairs': sorted(value_corpus_pairs.items(), key=lambda x: x[1][0]["title"]),
'is_corpus': meta.name == "corpus",
'b64_meta_values': b64_meta_values,
'b64_corpora': b64_corpora,
'annis_corpora': annis_corpora # """YXBvcGh0aGV<KEY>"""
})
return render(request, 'index.html', context)
# search --------------------------------------------------------------------------------
def _get_meta_names_for_query_text(text):
names = [sm.name for sm in models.SpecialMeta.objects.all()]
if "title" not in names:
names.append("title")
if "author" not in names:
names.append("author")
if text.lower().startswith('urn:'):
names.append("document_cts_urn")
return names
HTML_TAG_REGEX = re.compile(r'<[^>]*?>')
class SearchForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for sm in models.SpecialMeta.objects.all().order_by(Lower("name")):
meta_values = get_meta_values(sm)
choices = []
for v in meta_values:
if sm.name == "corpus":
try:
human_name = models.Corpus.objects.get(annis_corpus_name=v).title
except models.Corpus.DoesNotExist:
human_name = v
else:
human_name = v
human_name = re.sub(HTML_TAG_REGEX, '', human_name)
choices.append((v, human_name))
self.fields[sm.name] = forms.MultipleChoiceField(
label=sm.name,
required=False,
choices=choices,
widget=forms.SelectMultiple(attrs={'class': 'search-choice-field'})
)
text = forms.CharField(
label="query",
required=False,
widget=forms.TextInput(attrs={'class': 'search-text-field'})
)
def _build_queries_for_special_metadata(params):
queries = []
for meta_name, meta_values in params.items():
if meta_name == 'text':
continue
meta_values = sorted([s.strip() for s in meta_values])
meta_name_query = Q()
for meta_value in meta_values:
if meta_value:
if meta_name == 'document_cts_urn':
meta_name_query = meta_name_query | Q(text_meta__name__iexact=meta_name, text_meta__value__startswith=meta_value)
elif models.SpecialMeta.objects.get(name=meta_name).splittable:
meta_name_query = meta_name_query | Q(text_meta__name__iexact=meta_name, text_meta__value__icontains=meta_value)
else:
meta_name_query = meta_name_query | Q(text_meta__name__iexact=meta_name, text_meta__value__iexact=meta_value)
queries.append(meta_name_query)
return queries
def _filter_by_document_cts_urn(texts, document_cts_urn):
to_exclude = []
for text in texts:
if not urnlib.partial_parts_match(document_cts_urn[0], text.urn_code):
to_exclude.append(text.id)
texts = texts.exclude(id__in=to_exclude)
return texts
def _fetch_and_filter_texts_for_special_metadata_query(queries, params):
texts = models.Text.objects.all().order_by(Lower("title"))
for query in queries:
texts = texts.filter(query)
add_author_and_urn(texts)
# need special logic for this param. Would be nice to implement this as a Django `Lookup`, but
# that would require us to do parsing in SQL--not pretty.
if 'document_cts_urn' in params:
texts = _filter_by_document_cts_urn(texts, params['document_cts_urn'])
return texts
def _build_explanation(params):
meta_explanations = []
for meta_name, meta_values in params.items():
if meta_name == "text":
continue
if meta_name == "corpus":
new_meta_values = []
for meta_value in meta_values:
try:
meta_value = models.Corpus.objects.get(annis_corpus_name=meta_value).title
except models.Corpus.DoesNotExist:
pass
new_meta_values.append(meta_value)
meta_values = new_meta_values
# indicate the special logic used for document_cts_urn
sep = '=' if meta_name != 'document_cts_urn' else 'matching'
meta_name_explanations = (
[
f'<span class="meta_pair">{meta_name}</span> {sep} <span class="meta_pair">{meta_value}</span>'
for meta_value in meta_values
]
)
meta_explanations.append("(" + " OR ".join(meta_name_explanations) + ")")
return " AND ".join(meta_explanations)
def _build_result_for_query_text(query_text, texts, explanation):
results = []
meta_names = _get_meta_names_for_query_text(query_text)
for meta_name in meta_names:
complete_explanation = f'<span class="meta_pair">{query_text}</span> in "{meta_name}"'
complete_explanation += ' with ' if explanation else ''
complete_explanation += explanation
text_results = texts.filter(text_meta__name__iexact=meta_name,
text_meta__value__icontains=query_text)
add_author_and_urn(text_results)
results.append({
'texts': text_results,
'explanation': complete_explanation
})
all_empty_explanation = f'<span class="meta_pair">{query_text}</span> in any field'
all_empty_explanation += ' with ' if explanation else ''
all_empty_explanation += explanation
return results, all_empty_explanation
def _base_context():
search_fields = get_search_fields()
context = {
'search_fields': search_fields[:5],
'secondary_search_fields': search_fields[5:]
}
return context
def search(request):
context = _base_context()
# possible keys are "text", which is the freetext that a user entered,
# and slugs corresponding to SpecialMetas (e.g. "author", "translation", ...)
# which the user can select in the sidebar on right-hand side of the screen
params = dict(request.GET.lists())
# (1) unwrap the list of length 1 in params['text'] if it exists
# (2) if params['text'] starts with "urn:", treat it as a special case, first checking for redirects, then
# copying it to params['document_cts_urn'] (it is in a list to remain symmetric with all other non-'text' fields)
if "text" in params:
assert len(params['text']) == 1
params['text'] = params["text"][0].strip()
if params['text'].startswith('urn:'):
urn = params['text']
# check for redirects
if re.match(r'urn:cts:copticLit:ot.*.crosswire', urn):
return redirect('https://github.com/CopticScriptorium/corpora/releases/tag/v2.5.0')
urn = DEPRECATED_URNS.get(urn, urn)
obj = _resolve_urn(urn)
if obj.__class__.__name__ == "Text":
return redirect('text', corpus=obj.corpus.slug, text=obj.slug)
elif obj.__class__.__name__ == "Corpus":
return redirect('corpus', corpus=obj.slug)
# no redirect, proceed with search
params['document_cts_urn'] = [urn]
# returns a list of queries built with Django's Q operator using non-freetext parameters
queries = _build_queries_for_special_metadata(params)
# preliminary results--might need to filter more if freetext query is present
texts = _fetch_and_filter_texts_for_special_metadata_query(queries, params)
# build base explanation, a string that will be displayed to the user summarizing their search parameters
explanation = _build_explanation(params)
if 'text' in params:
results, all_empty_explanation = _build_result_for_query_text(params['text'], texts, explanation)
else:
results = [{
'texts': texts,
'explanation': explanation
}]
all_empty_explanation = explanation
context.update({
'results': results,
'form': SearchForm(request.GET),
'no_query': not any(len(v) for v in request.GET.dict().values()),
'all_empty': not any(len(r['texts']) for r in results),
'all_empty_explanation': all_empty_explanation,
})
return render(request, 'search.html', context)
def add_author_and_urn(texts):
for text in texts:
try:
text.author = text.text_meta.get(name="author").value
except models.TextMeta.DoesNotExist:
pass
try:
text.urn_code = text.text_meta.get(name="document_cts_urn").value
except models.TextMeta.DoesNotExist:
pass
| StarcoderdataPython |
3376042 | from app import db
from datetime import datetime
from logging import log
from time import time
class Organisation(db.Model):
__tablename__ = 'organisations'
id = db.Column(db.Integer, primary_key=True)
#user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete="CASCADE"), nullable=False)
#image_filename = db.Column(db.String, default=None, nullable=True)
#image_url = db.Column(db.String, default=None, nullable=True)
org_name = db.Column(db.String(255))
org_city = db.Column(db.String(255))
org_state = db.Column(db.String(255))
org_country = db.Column(db.String(255))
org_website = db.Column(db.String(255))
org_short_description = db.Column(db.String(255))
org_industry = db.Column(db.String(255))
org_description = db.Column(db.Text)
logos = db.relationship('Logo', backref='organisation', lazy='dynamic')
teams = db.relationship('Team', backref='organisation', lazy='dynamic')
services = db.relationship('Service', backref='organisation', lazy='dynamic')
testimonials = db.relationship('Testimonial', backref='organisation', lazy='dynamic')
portfolios = db.relationship('Portfolio', backref='organisation', lazy='dynamic')
users = db.relationship('User', backref='organisation', lazy='dynamic')
#users = db.relationship('User', backref='organisation', foreign_keys='Organisation.user_id')
#user = db.relationship('User', backref='organisations', cascade='all, delete')
#users = db.relationship('User', backref='organisation', lazy='dynamic',
#primaryjoin="Organisation.id == User.organisation_id")
created_at = db.Column(db.DateTime, default=datetime.now)
updated_at = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
def __repr__(self):
return u'<{self.__class__.__name__}: {self.id}>'.format(self=self)
def get_staff(self):
ids = [user.user_id for user in self.staff]
return User.query.filter(User.id.in_(ids)).all()
def get_photo(self):
if self.image_filename:
return url_for('_uploads.uploaded_file', setname='images', filename=self.image_filename, _external=True)
else:
return url_for('static', filename="images/medium_logo_default.png")
| StarcoderdataPython |
167193 | # It must be here to retrieve this information from the dummy
core_universal_identifier = 'd9d94986-ea14-11e0-bd1d-00216a5807c8'
core_universal_identifier_human = 'Consumer'
db_database = "WebLabTests"
weblab_db_username = 'weblab'
weblab_db_password = '<PASSWORD>'
debug_mode = True
#########################
# General configuration #
#########################
server_hostaddress = 'weblab.deusto.es'
server_admin = '<EMAIL>'
################################
# Admin Notifier configuration #
################################
mail_notification_enabled = False
##########################
# Sessions configuration #
##########################
session_mysql_username = 'weblab'
session_mysql_password = '<PASSWORD>'
session_locker_mysql_username = session_mysql_username
session_locker_mysql_password = session_mysql_password
| StarcoderdataPython |
117882 | <filename>micropython/tests/umqtt/robust.py
"""Fake mqtt interface - this simulates the api provided
by micropython. We use paho.mqtt to talk to the broker.
"""
import paho.mqtt.client
class MQTTClient:
def __init__(self, name, host, port):
self.client = paho.mqtt.client.Client(name)
self.host = host
self.port = port
def connect(self):
self.client.connect(self.host, self.port)
self.client.loop_start()
def disconnect(self):
self.client.disconnect()
self.client.loop_stop(force=False)
def publish(self, topic, data):
topic = str(topic, encoding='utf-8') # paho wants a string
print("publishing %s on %s" % (repr(data), repr(topic)))
self.client.publish(topic, data)
| StarcoderdataPython |
3343497 | import os
import ffmpeg
import numpy as np
# from spleeter import *
# from spleeter.audio.adapter import get_default_audio_adapter
# from spleeter.separator import Separator
# from spleeter.utils import *
from django.conf import settings
from .models import ProcessedTrack
class SpleeterSeparator:
"""Performs source separation using Spleeter API."""
def __init__(self, config=None):
"""Default constructor.
:param config: Separator config, defaults to None
"""
if config is None:
self.audio_bitrate = '256k'
self.audio_format = 'wav'
self.sample_rate = 44100
self.spleeter_stem = 'config/4stems-16kHz.json'
else:
self.audio_bitrate = config['audio_bitrate']
self.audio_format = config['audio_format']
self.sample_rate = config['sample_rate']
self.spleeter_stem = config['spleeter_stem']
# Use librosa backend as it is less memory intensive
self.separator = Separator(self.spleeter_stem, stft_backend='librosa', multiprocess=False)
self.audio_adapter = get_default_audio_adapter()
def separate(self, parts, input_path, dir_name, file_name):
"""Performs source separation by adding together the parts to be kept.
:param parts: List of parts to keep ('vocals', 'drums', 'bass', 'other')
:param input_path: Path to source file
:param output_path: Path to output file
:raises e: FFMPEG error
"""
waveform, _ = self.audio_adapter.load(input_path, sample_rate=self.sample_rate)
prediction = self.separator.separate(waveform)
out = np.zeros_like(prediction['vocals'])
part_count = 0
ret = {}
# Add up parts that were requested
for key in prediction:
dir = os.path.join(dir_name, key+".mp3")
self.audio_adapter.save(os.path.join(settings.MEDIA_ROOT, dir), prediction[key], self.separator._sample_rate, self.audio_format, self.audio_bitrate)
ret[key] = dir
if parts[key]:
out += prediction[key]
part_count += 1
out /= part_count
req_path = os.path.join(dir_name, file_name)
self.audio_adapter.save(os.path.join(settings.MEDIA_ROOT, req_path), out, self.separator._sample_rate, self.audio_format, self.audio_bitrate)
ret["req"] = req_path
return ret
def cached(self, parts, source_track, dir_name, file_name):
bass_path = ProcessedTrack.objects.filter(source_track=source_track, bass=True, vocals=False, other=False, drums=False).first().file.name
vocals_path = ProcessedTrack.objects.filter(source_track=source_track, bass=False, vocals=True, other=False, drums=False).first().file.name
other_path = ProcessedTrack.objects.filter(source_track=source_track, bass=False, vocals=False, other=True, drums=False).first().file.name
drums_path = ProcessedTrack.objects.filter(source_track=source_track, bass=False, vocals=False, other=False, drums=True).first().file.name
ret = {
"bass": bass_path,
"vocals": vocals_path,
"other": other_path,
"drums": drums_path,
}
load_paths = {k:ret[k] for k in parts if parts[k]}
arrs = [self.audio_adapter.load(os.path.join(settings.MEDIA_ROOT, p))[0] \
for p in load_paths.values()]
out = sum(arrs) / len(arrs)
req_path = os.path.join(dir_name, file_name)
self.audio_adapter.save(os.path.join(settings.MEDIA_ROOT, req_path), out, self.separator._sample_rate, self.audio_format, self.audio_bitrate)
ret["req"] = req_path
return ret
| StarcoderdataPython |
43948 | # -----------------------------------------------------------------------------
# System Imports
# -----------------------------------------------------------------------------
from operator import itemgetter
# -----------------------------------------------------------------------------
# Public Imports
# -----------------------------------------------------------------------------
from netcad.design_services import Design
from netcad.topology import TopologyDesignService
from netcad.device import DeviceCatalog
# -----------------------------------------------------------------------------
# Private Imports
# -----------------------------------------------------------------------------
from .device_roles import MS220p8, MR52, MX65, MR42
from .profiles import AccessVlan1
def create_design(design: Design) -> Design:
aliases = design.config["alias"] = dict()
aliases["sw01"] = MS220p8(name="ms01-dl1")
aliases["sw02"] = MS220p8(name="ms01-dl2")
aliases["sw03"] = MS220p8(name="ms01-dl3")
aliases["ap01"] = MR52("ap01-dl1")
aliases["ap02"] = MR42("ap01-dl2")
aliases["ap03"] = MR52("ap01-dl3")
aliases["mx01"] = MX65(name="mx01-dl1")
aliases["mx02"] = MX65(name="mx01-dl2")
all_devs = list(aliases.values())
design.add_devices(*all_devs)
design.add_services(
TopologyDesignService(topology_name=design.name, devices=all_devs)
)
cable_devices(design)
design.update()
return design
def cable_devices(design: Design):
aliasses: DeviceCatalog = design.config["alias"]
sw01, sw02, sw03 = itemgetter("sw01", "sw02", "sw03")(aliasses)
ap01, ap02, ap03 = itemgetter("ap01", "ap02", "ap03")(aliasses)
mx01, mx02 = itemgetter("mx01", "mx02")(aliasses)
cable_id = 1
# -------------------------------------------------------------------------
# Cable Access-Points to Switches
# -------------------------------------------------------------------------
# ap01.0 --- sw03.1
with ap01.interfaces["wired0"] as ap_w0, sw03.interfaces["1"] as sw03_1:
ap_w0.profile = AccessVlan1()
sw03_1.profile = AccessVlan1()
ap_w0.cable_id = sw03_1.cable_id = f"cable_{cable_id}"
cable_id += 1
# ap02.0 --- sw01.2
with ap02.interfaces["wired0"] as ap_w0, sw01.interfaces["2"] as sw_iface:
ap_w0.profile = AccessVlan1()
sw_iface.profile = AccessVlan1()
ap_w0.cable_id = sw_iface.cable_id = f"cable_{cable_id}"
cable_id += 1
# ap03.0 -- sw02.2
with ap03.interfaces["wired0"] as ap_w0, sw02.interfaces["2"] as sw_iface:
ap_w0.profile = AccessVlan1()
sw_iface.profile = AccessVlan1()
ap_w0.cable_id = sw_iface.cable_id = f"cable_{cable_id}"
cable_id += 1
# -------------------------------------------------------------------------
# Cable Switches to Appliance
# -------------------------------------------------------------------------
# sw01.1 -- mx-office (not in design yet)
# sw02.1 -- mx02.3
with sw02.interfaces["1"] as sw_iface, mx02.interfaces["3"] as mx_iface:
mx_iface.profile = AccessVlan1()
sw_iface.profile = AccessVlan1()
mx_iface.cable_id = sw_iface.cable_id = f"cable_{cable_id}"
cable_id += 1
# sw03.2 -- mx01.3
with sw03.interfaces["2"] as sw_iface, mx01.interfaces["3"] as mx_iface:
mx_iface.profile = AccessVlan1()
sw_iface.profile = AccessVlan1()
mx_iface.cable_id = sw_iface.cable_id = f"cable_{cable_id}"
cable_id += 1
| StarcoderdataPython |
156854 | <filename>buildchatbot.py
#
# buildchatbot - Monitors Jenkins builds and sends notifications to a Skype chat
#
# Copyright (c) 2012 <NAME> - All rights reserved.
# Licensed under the BSD 2-clause license; see LICENSE.txt
#
import platform
from time import sleep
from urllib import urlopen
from Skype4Py import Skype
from xml.etree import ElementTree
JENKINS_URL = 'http://127.0.0.1:8080'
SKYPE_CHAT = '#user/$abc123'
UPDATE_INTERVAL = 15 # seconds
MESSAGE_PREFIX = '[Jenkins] '
class Build:
def __init__(self, attrs):
self.name = attrs['name']
self.number = attrs['lastBuildLabel']
self.status = attrs['lastBuildStatus']
class BuildMonitor:
def __init__(self, listener):
self.builds = None
self.listener = listener
def loop(self):
while True:
try:
self.check_for_new_builds()
except IOError as e:
print 'WARNING! update failed:', e.strerror
sleep(UPDATE_INTERVAL)
def check_for_new_builds(self):
builds = self.fetch_builds()
if self.builds is not None:
for build in builds.values():
name = build.name
if not self.builds.has_key(name):
self.handle_new_build(build, None)
elif build.number != self.builds[name].number:
self.handle_new_build(build, self.builds[name].status)
self.builds = builds
def handle_new_build(self, build, old_status):
transition = (old_status, build.status)
if transition == ('Failure', 'Failure'):
self.listener.notify(build, '(rain) Still failing')
elif transition == ('Failure', 'Success'):
self.listener.notify(build, '(sun) Fixed')
elif build.status == 'Failure':
self.listener.notify(build, '(rain) Failed')
def fetch_builds(self):
builds = {}
response = urlopen(JENKINS_URL +'/cc.xml')
projects = ElementTree.parse(response).getroot()
for project in projects.iter('Project'):
build = Build(project.attrib)
builds[build.name] = build
return builds
class BuildNotifier:
def __init__(self):
if platform.system() == 'Windows':
skype = Skype()
else:
skype = Skype(Transport='x11')
skype.Attach()
self.chat = skype.Chat(SKYPE_CHAT)
def notify(self, build, event):
message = event +': '+ build.name +' - '+ JENKINS_URL +'/job/'+ build.name +'/'+ build.number +'/'
print message
self.chat.SendMessage(MESSAGE_PREFIX + message)
if __name__ == '__main__':
try:
BuildMonitor(BuildNotifier()).loop()
except KeyboardInterrupt:
pass
| StarcoderdataPython |
1758811 | from future.utils import with_metaclass as with_metaclass_future
from six import with_metaclass as with_metaclass_six
__all__ = ["interoperable_with_metaclass_future", "interoperable_with_metaclass_six"]
def interoperable_with_metaclass(with_metaclass, metaclass):
return type("{0}Wrapper".format(metaclass.__name__), (with_metaclass(metaclass),), {})
def interoperable_with_metaclass_future(metaclass):
return interoperable_with_metaclass(with_metaclass_future, metaclass)
def interoperable_with_metaclass_six(metaclass):
return interoperable_with_metaclass(with_metaclass_six, metaclass)
| StarcoderdataPython |
129651 | <reponame>jsheperd/rotate_backup
#!/usr/bin/env python
import sys
import os
import glob
import time
class archive:
# The archive class represent an archive media with its age related parameters
def __init__(self, path):
self.path = path
self.time = time.gmtime(os.path.getmtime(path))
self.year = time.strftime("%Y", self.time)
self.month = time.strftime("%Y%m", self.time)
self.week = time.strftime("%Y%W", self.time)
self.day = time.strftime("%Y%m%d", self.time)
self.hour = time.strftime("%Y%m%d%H", self.time)
self.min = time.strftime("%Y%m%d%H%M", self.time)
self.sec = time.strftime("%Y%m%d%H%M%S", self.time)
def rm(self):
# remove the archive from the filesystem
print "rm %s" % self.path
os.remove(self.path)
class binStoreNewest:
# class to store binNum binStores in younger to older order
# each binstore represent an archive, that is the youngest one of its group
def __init__(self, binNum):
self.bins = {}
self.binNum = binNum
def add(self, id, item):
# add a new archive to the clustering
if id in self.bins: # there is an archive from this group already
storedItem = self.bins[id]
if storedItem.time < item.time: # act item is newer then the stored one,
self.bins[id] = item # replace that
else:
self.bins[id] = item # there wasn't archive for this group till now
keys = self.bins.keys()
keys.sort()
for id in keys[:-self.binNum]: # keep the binNum newest ones
del self.bins[id]
def getPaths(self):
return [item.path for item in self.bins.values()]
def getBinTops(sourceArray, binNum, clusterFunction):
# Create groups from the archives by the clusterFunction
# Return with the newest archives from each group for the newset binNum groups
binStore = binStoreNewest(binNum)
for item in sourceArray:
binStore.add(clusterFunction(item), item)
return binStore.getPaths()
if __name__ == '__main__':
# Example usage
if len(sys.argv) >= 2:
files = sys.argv[1:]
else:
files = glob.glob("./data/*")
archives = [archive(filename) for filename in files]
daily = getBinTops(archives, 7, lambda item: item.day)
weekly = getBinTops(archives, 4, lambda item: item.week)
monthly = getBinTops(archives, 12, lambda item: item.month)
yearly = getBinTops(archives, 10, lambda item: item.year)
keepPaths = daily + weekly + monthly + yearly
for item in archives:
if item.path not in keepPaths:
item.rm()
| StarcoderdataPython |
170800 | <reponame>damicoedoardo/NNMF
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 16/09/2017
@author: XXX
"""
from RecSysFramework.Recommender import Recommender
from RecSysFramework.Recommender.KNN import ItemKNNCustomSimilarity
from RecSysFramework.Utils import check_matrix
from RecSysFramework.Utils import EarlyStoppingModel
from RecSysFramework.Recommender.DataIO import DataIO
import seaborn as sns
from sklearn.preprocessing import normalize
import numpy as np
import os
import scipy.sparse as sps
from RecSysFramework.Utils.compute_popularity import compute_popularity_item, compute_popularity_user
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import math
def compute_W_sparse_from_item_latent_factors(ITEM_factors, topK=100):
n_items, n_factors = ITEM_factors.shape
block_size = 100
start_item = 0
end_item = 0
values = []
rows = []
cols = []
# Compute all similarities for each item using vectorization
while start_item < n_items:
end_item = min(n_items, start_item + block_size)
this_block_weight = np.dot(ITEM_factors[start_item:end_item, :], ITEM_factors.T)
for col_index_in_block in range(this_block_weight.shape[0]):
this_column_weights = this_block_weight[col_index_in_block, :]
item_original_index = start_item + col_index_in_block
# Sort indices and select TopK
# Sorting is done in three steps. Faster then plain np.argsort for higher number of items
# - Partition the data to extract the set of relevant items
# - Sort only the relevant items
# - Get the original item index
relevant_items_partition = (-this_column_weights).argpartition(topK-1)[0:topK]
relevant_items_partition_sorting = np.argsort(-this_column_weights[relevant_items_partition])
top_k_idx = relevant_items_partition[relevant_items_partition_sorting]
# Incrementally build sparse matrix, do not add zeros
notZerosMask = this_column_weights[top_k_idx] != 0.0
numNotZeros = np.sum(notZerosMask)
values.extend(this_column_weights[top_k_idx][notZerosMask])
rows.extend(top_k_idx[notZerosMask])
cols.extend(np.ones(numNotZeros) * item_original_index)
start_item += block_size
W_sparse = sps.csr_matrix((values, (rows, cols)),
shape=(n_items, n_items),
dtype=np.float32)
return W_sparse
class BaseMatrixFactorizationRecommender(Recommender):
"""
This class refers to a BaseRecommender KNN which uses matrix factorization,
it provides functions to compute item's score as well as a function to save the W_matrix
The prediction for cold users will always be -inf for ALL items
"""
def __init__(self, URM_train):
super(BaseMatrixFactorizationRecommender, self).__init__(URM_train)
self.use_bias = False
self.user_update_count = None
self.item_update_count = None
self._cold_user_KNN_model_flag = False
self._cold_user_KNN_estimated_factors_flag = False
self._warm_user_KNN_mask = np.zeros(len(self._get_cold_user_mask()), dtype=np.bool)
def set_URM_train(self, URM_train_new, estimate_model_for_cold_users = False, topK = 100, **kwargs):
"""
:param URM_train_new:
:param estimate_item_similarity_for_cold_users: Set to TRUE if you want to estimate the item-item similarity for cold users to be used as in a KNN algorithm
:param topK: 100
:param kwargs:
:return:
"""
assert self.URM_train.shape == URM_train_new.shape, "{}: set_URM_train old and new URM train have different shapes".format(self.RECOMMENDER_NAME)
if len(kwargs)>0:
self._print("set_URM_train keyword arguments not supported for this recommender class. Received: {}".format(kwargs))
URM_train_new = check_matrix(URM_train_new, 'csr', dtype=np.float32)
profile_length_new = np.ediff1d(URM_train_new.indptr)
if estimate_model_for_cold_users == "itemKNN":
self._print("Estimating ItemKNN model from ITEM latent factors...")
W_sparse = compute_W_sparse_from_item_latent_factors(self.ITEM_factors, topK=topK)
self._ItemKNNRecommender = ItemKNNCustomSimilarity(URM_train_new)
self._ItemKNNRecommender.fit(W_sparse, topK=topK)
self._ItemKNNRecommender_topK = topK
self._cold_user_KNN_model_flag = True
self._warm_user_KNN_mask = profile_length_new > 0
self._print("Estimating ItemKNN model from ITEM latent factors... done!")
elif estimate_model_for_cold_users == "mean_item_factors":
self._print("Estimating USER latent factors from ITEM latent factors...")
cold_user_mask_previous = self._get_cold_user_mask()
profile_length_sqrt = np.sqrt(profile_length_new)
self.USER_factors[cold_user_mask_previous,:] = URM_train_new.dot(self.ITEM_factors)[cold_user_mask_previous,:]
self._cold_user_KNN_estimated_factors_flag = True
#Divide every row for the sqrt of the profile length
for user_index in range(self.n_users):
if cold_user_mask_previous[user_index] and profile_length_sqrt[user_index] > 0:
self.USER_factors[user_index, :] /= profile_length_sqrt[user_index]
self._print("Estimating USER latent factors from ITEM latent factors... done!")
self.URM_train = check_matrix(URM_train_new.copy(), 'csr', dtype=np.float32)
self.URM_train.eliminate_zeros()
#########################################################################################################
########## ##########
########## COMPUTE ITEM SCORES ##########
########## ##########
#########################################################################################################
def _compute_item_score(self, user_id_array, items_to_compute = None):
"""
USER_factors is n_users x n_factors
ITEM_factors is n_items x n_factors
The prediction for cold users will always be -inf for ALL items
:param user_id_array:
:param items_to_compute:
:return:
"""
assert self.USER_factors.shape[1] == self.ITEM_factors.shape[1], \
"{}: User and Item factors have inconsistent shape".format(self.RECOMMENDER_NAME)
assert self.USER_factors.shape[0] > user_id_array.max(),\
"{}: Cold users not allowed. Users in trained model are {}, requested prediction for users up to {}".format(
self.RECOMMENDER_NAME, self.USER_factors.shape[0], user_id_array.max())
if items_to_compute is not None:
item_scores = - np.ones((len(user_id_array), self.ITEM_factors.shape[0]), dtype=np.float32)*np.inf
item_scores[:, items_to_compute] = np.dot(self.USER_factors[user_id_array], self.ITEM_factors[items_to_compute,:].T)
else:
item_scores = np.dot(self.USER_factors[user_id_array], self.ITEM_factors.T)
# No need to select only the specific negative items or warm users because the -inf score will not change
if self.use_bias:
item_scores += self.ITEM_bias + self.GLOBAL_bias
item_scores = (item_scores.T + self.USER_bias[user_id_array]).T
item_scores = self._compute_item_score_postprocess_for_cold_users(user_id_array, item_scores, items_to_compute = items_to_compute)
item_scores = self._compute_item_score_postprocess_for_cold_items(item_scores)
return item_scores
def _compute_item_score_postprocess_for_cold_users(self, user_id_array, item_scores, items_to_compute = None):
"""
Remove cold users from the computed item scores, setting them to -inf
Or estimate user factors with specified method
:param user_id_array:
:param item_scores:
:return:
"""
cold_users_batch_mask = self._get_cold_user_mask()[user_id_array]
# Set as -inf all cold user scores
if cold_users_batch_mask.any() and not self._cold_user_KNN_estimated_factors_flag:
if self._cold_user_KNN_model_flag:
# Add KNN scores for users cold for MF but warm in KNN model
cold_users_in_MF_warm_in_KNN_mask = np.logical_and(cold_users_batch_mask, self._warm_user_KNN_mask[user_id_array])
item_scores[cold_users_in_MF_warm_in_KNN_mask, :] = self._ItemKNNRecommender._compute_item_score(user_id_array[cold_users_in_MF_warm_in_KNN_mask], items_to_compute=items_to_compute)
# Set cold users as those neither in MF nor in KNN
cold_users_batch_mask = np.logical_and(cold_users_batch_mask, np.logical_not(cold_users_in_MF_warm_in_KNN_mask))
# Set as -inf all remaining cold user scores
item_scores[cold_users_batch_mask, :] = - np.ones_like(item_scores[cold_users_batch_mask, :]) * np.inf
return item_scores
#########################################################################################################
########## ##########
########## LOAD AND SAVE ##########
########## ##########
#########################################################################################################
def _get_dict_to_save(self):
data_dict_to_save = {"USER_factors": self.USER_factors,
"ITEM_factors": self.ITEM_factors,
"use_bias": self.use_bias,
"_cold_user_mask": self._cold_user_mask,
"_cold_user_KNN_model_flag": self._cold_user_KNN_model_flag,
"_cold_user_KNN_estimated_factors_flag": self._cold_user_KNN_estimated_factors_flag}
if self.use_bias:
data_dict_to_save["ITEM_bias"] = self.ITEM_bias
data_dict_to_save["USER_bias"] = self.USER_bias
data_dict_to_save["GLOBAL_bias"] = self.GLOBAL_bias
if self._cold_user_KNN_model_flag:
data_dict_to_save["_ItemKNNRecommender_W_sparse"] = self._ItemKNNRecommender.W_sparse
data_dict_to_save["_ItemKNNRecommender_topK"] = self._ItemKNNRecommender_topK
return data_dict_to_save
def load_model(self, folder_path, file_name=None):
super(BaseMatrixFactorizationRecommender, self).load_model(folder_path, file_name=file_name)
if self._cold_user_KNN_model_flag:
self._ItemKNNRecommender = ItemKNNCustomSimilarity(self.URM_train)
self._ItemKNNRecommender.fit(self._ItemKNNRecommender_W_sparse, topK=self._ItemKNNRecommender_topK)
del self._ItemKNNRecommender_W_sparse
del self._ItemKNNRecommender_topK
def plot_items_sampled_stats(self, items_to_plot=5000, plot_complete_graphic=True, normalized=False):
"""
it makes and display how many times a given item has been updated, associating to each of them a color based on
the popularity
:param items_to_plot: how many items per plot
:param plot_complete_graphic: where to plot the complete graphic all at once
:return:
"""
assert self.item_update_count is not None, 'the model has not implemented this function yet or have not been trained'
popularity_list = compute_popularity_item(self.URM_train)
item, interaction = zip(* popularity_list)
colors = cm.coolwarm(np.array(interaction))
color_mapping_dict = dict(zip(item, colors))
#item, num_sampled = zip(*self.item_update_count)
unsorted_list = list(self.item_update_count.items())
sorted_list = sorted(unsorted_list, key=lambda x: x[1])
item_id, sampled_count = zip(*sorted_list)
if normalized:
sampled_count = sampled_count/max(sampled_count)
# map the popularity of the item to its color
plot_colors = []
for id in item_id:
plot_colors.append(color_mapping_dict[id])
x_pos = np.arange(len(item_id))
for i in range(math.ceil(self.n_items/items_to_plot)):
if (i+1)*items_to_plot < len(item_id):
x_pos_slice = x_pos[items_to_plot*i:items_to_plot*(i+1)]
sampled_count_slice = sampled_count[items_to_plot*i:items_to_plot*(i+1)]
plot_colors_slice = plot_colors[items_to_plot*i:items_to_plot*(i+1)]
else:
x_pos_slice = x_pos[items_to_plot*i:-1]
sampled_count_slice = sampled_count[items_to_plot*i:-1]
plot_colors_slice = plot_colors[items_to_plot*i:-1]
plt.bar(x_pos_slice, sampled_count_slice, align='center', color=np.array(plot_colors_slice))
plt.show()
if plot_complete_graphic:
plt.bar(x_pos, sampled_count, align='center', color=np.array(plot_colors))
plt.show()
def plot_users_sampled_stats(self):
#TODO MERGE THIS IN THE METHOD ABOVE!!!
assert self.user_update_count is not None, 'the model has not implemented this function yet or have not been trained'
popularity_list = compute_popularity_user(self.URM_train)
user, interaction = zip(* popularity_list)
colors = cm.PiYG(np.array(interaction))
color_mapping_dict = dict(zip(user, colors))
#item, num_sampled = zip(*self.item_update_count)
unsorted_list = list(self.user_update_count.items())
sorted_list = sorted(unsorted_list, key=lambda x: x[1])
user_id, sampled_count = zip(*sorted_list)
sampled_count = sampled_count/max(sampled_count)
# map the popularity of the item to its color
plot_colors = []
for id in user_id:
plot_colors.append(color_mapping_dict[id])
x_pos = np.arange(len(user_id))
plt.bar(x_pos, sampled_count, align='center', color=np.array(plot_colors))
plt.show()
def plot_latent_representations_heatmap(self):
"""
plot the latent representation of the items and user using an heatmap, the value of the representation
are normalized
"""
# retrieve the latent factors of the users and the items
print('Normalizing...')
items_factors_normalized = normalize(self.ITEM_factors, axis=1, norm='l1')
users_factors_normalized = normalize(self.USER_factors, axis=1, norm ='l1')
print('Done!')
ax1 = plt.subplot(211)
ax2 = plt.subplot(212)
heat_map_items = sns.heatmap(items_factors_normalized, xticklabels=False, yticklabels=False, annot=False,
cmap='Reds', ax=ax1)
heat_map_users = sns.heatmap(users_factors_normalized, xticklabels=False, yticklabels=False, annot=False,
cmap='Greens', ax=ax2)
plt.show()
| StarcoderdataPython |
3288965 | <filename>gwrapper/string_filter.py
import requests
class String_Filter(object):
def __init__(self, list, text_list, filter, entity, auth):
self.pr_list = list
self.text_list = text_list
self.filter = filter
self.entity = entity
self.auth = auth
self.response_list = []
def filter_by_string(self):
for pr in self.pr_list:
if (self.entity == 'commits'):
commits = requests.get(
pr['commits_url'], auth=self.auth
).json()
for com in commits:
self.filter_check(
self.text_list, com["commit"]["message"].split(),
pr
)
else:
files = requests.get(
pr['url']+'/files', auth=self.auth
).json()
filename_list = [f["filename"] for f in files]
print(filename_list)
self.filter_check(self.text_list, filename_list, pr)
return [i for n, i in enumerate(self.response_list)
if i not in self.response_list[n+1:]
]
def filter_check(self, string_list, split_string, pr):
if (self.filter == 'any'):
if any(x in split_string for x in string_list):
self.response_list.append({pr['title']: pr['url']})
elif(self.filter == 'all'):
if all(x in split_string for x in string_list):
self.response_list.append({pr['title']: pr['url']})
| StarcoderdataPython |
169115 | import sys
import h5py
import numpy as np
from pydata.increment import __next_index__
if 'pyslave' in sys.modules :
from pyslave import __slave_disp__ as disp
else:
disp = print
class createh5(h5py.File):
"""Create a new H5 file to save data.
Use the append_dataset to add data to the file."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__data_counter__ = dict()
self.fname = args[0]
def __next_dataset__(self, dataset, ndigits):
if not dataset in self.__data_counter__ :
counter = __next_index__(dataset,'',self.keys())
else:
counter = self.__data_counter__[dataset] + 1
return counter, dataset + str(counter).zfill(ndigits)
def append(self, data, **kwargs):
"""Create a new dataset with automatic increment of the name and save data to it.
N.B. : data is an instance of the pyslave.datadict. Data class
Attributes can be added."""
data.save_h5(self,)
counter, dataset_name = self.__next_dataset__(dataset, ndigits)
ds = super().create_dataset(dataset_name, data=data.__data__, **kwargs)
attributes = data.__attributes__.copy()
if attrs : attributes.update(attrs)
self.__data_counter__[dataset] = counter
for k,v in attributes.items() :
ds.attrs[k] = v
self.flush()
msg = 'Data saved to {0} in dataset {1}.'.format(self.fname, dataset_name)
disp(msg)
class loadh5:
"""Load all datasets of a H5 file into numpy arrays.
Example :
d = loadh5('Sij_vs_Temperature.h5')
print(d)
Loaded from Sij_vs_Temperature.h5 with 70 datasets
Data fields : freq,Sij
Attributes : T
plot(T, abs(Sij).max(1))
"""
def __init__(self, filename,print_file = True):
with h5py.File(filename,'r') as f:
keys = list(f.keys())
length = len(keys)
dataset = f[keys[0]]
attr_keys = list(dataset.attrs.keys())
data_keys = dataset.dtype.names
# Build attribute array
all_attrs = { k:np.empty(length, dtype=type(dataset.attrs[k]) ) for k in attr_keys}
all_data = { k:np.empty((length, len(dataset[k])), dtype=dataset[k].dtype) for k in data_keys}
for i,d in enumerate(f.values()):
for k in attr_keys : all_attrs[k][i] = d.attrs[k]
for k in data_keys : all_data[k][i] = d[k]
for k in attr_keys:
setattr(self, k, all_attrs[k])
for k in data_keys:
setattr(self, k, all_data[k])
self.attr_keys = attr_keys
self.data_keys = data_keys
self.length = length
self.filename = filename
if print_file:
print(self)
def __repr__(self):
s = "Loaded from {0} with {1} datasets\n".format(self.filename, self.length)
s += "Data fields : " + ', '.join(self.data_keys) + '\n'
s += "Attributes : " + ', '.join(self.attr_keys)
return s
| StarcoderdataPython |
3326833 | # Generated by Django 2.0.5 on 2019-03-26 06:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CRYE', '0018_auto_20190326_0006'),
]
operations = [
migrations.RemoveField(
model_name='tablaamortizacion',
name='balanceInsoluto',
),
migrations.RemoveField(
model_name='tablaamortizacion',
name='estatus',
),
migrations.RemoveField(
model_name='tablaamortizacion',
name='pagado',
),
migrations.RemoveField(
model_name='tablaamortizacion',
name='renta',
),
migrations.AddField(
model_name='tablaamortizacion',
name='activo',
field=models.CharField(blank=True, max_length=15, null=True, verbose_name='Activo'),
),
migrations.AddField(
model_name='tablaamortizacion',
name='dias_periodo',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='tablaamortizacion',
name='id_amortizacion',
field=models.CharField(blank=True, max_length=250, null=True, verbose_name='ID amortizacion'),
),
migrations.AddField(
model_name='tablaamortizacion',
name='iva_capital',
field=models.FloatField(default=0, verbose_name='Iva Capital'),
preserve_default=False,
),
migrations.AddField(
model_name='tablaamortizacion',
name='iva_interes',
field=models.FloatField(default=0, verbose_name='Iva Interes'),
preserve_default=False,
),
migrations.AddField(
model_name='tablaamortizacion',
name='pago',
field=models.CharField(blank=True, max_length=15, null=True, verbose_name='Pago'),
),
migrations.AddField(
model_name='tablaamortizacion',
name='renta_mensual',
field=models.FloatField(default=0, verbose_name='Renta Mensual'),
preserve_default=False,
),
migrations.AddField(
model_name='tablaamortizacion',
name='renta_total',
field=models.FloatField(default=0, verbose_name='Renta Total'),
preserve_default=False,
),
migrations.AddField(
model_name='tablaamortizacion',
name='saldo_insoluto',
field=models.FloatField(default=0, verbose_name='Saldo Insoluto'),
preserve_default=False,
),
migrations.AlterField(
model_name='walletcredit',
name='id_tabla',
field=models.CharField(blank=True, max_length=250, null=True, verbose_name='id_tabla'),
),
migrations.AlterField(
model_name='walletcredit',
name='walletcredit_tipo',
field=models.CharField(choices=[('CREDITO', 'CREDITO'), ('ARRENDAMIENTO', 'ARRENDAMIENTO')], default='CREDITO', max_length=250, verbose_name='Obligación'),
),
]
| StarcoderdataPython |
3348206 | import asyncio
import dataclasses
from enum import IntEnum
from typing import Any, List
from littlelambocoin.protocols.wallet_protocol import CoinStateUpdate, NewPeakWallet
from littlelambocoin.server.ws_connection import WSLittlelambocoinConnection
from littlelambocoin.types.blockchain_format.sized_bytes import bytes32
class NewPeakQueueTypes(IntEnum):
# Lower number means higher priority in the queue
COIN_ID_SUBSCRIPTION = 1
PUZZLE_HASH_SUBSCRIPTION = 2
FULL_NODE_STATE_UPDATED = 3
NEW_PEAK_WALLET = 4
@dataclasses.dataclass
class NewPeakItem:
item_type: NewPeakQueueTypes
data: Any
def __lt__(self, other):
if self.item_type != other.item_type:
return self.item_type < other.item_type
if self.item_type in {NewPeakQueueTypes.COIN_ID_SUBSCRIPTION, NewPeakQueueTypes.PUZZLE_HASH_SUBSCRIPTION}:
return False # All subscriptions are equal
return self.data[0].height < other.data[0].height
def __le__(self, other):
if self.item_type != other.item_type:
return self.item_type < other.item_type
if self.item_type in {NewPeakQueueTypes.COIN_ID_SUBSCRIPTION, NewPeakQueueTypes.PUZZLE_HASH_SUBSCRIPTION}:
return True # All subscriptions are equal
return self.data[0].height <= other.data[0].height
def __gt__(self, other):
if self.item_type != other.item_type:
return self.item_type > other.item_type
if self.item_type in {NewPeakQueueTypes.COIN_ID_SUBSCRIPTION, NewPeakQueueTypes.PUZZLE_HASH_SUBSCRIPTION}:
return False # All subscriptions are equal
return self.data[0].height > other.data[0].height
def __ge__(self, other):
if self.item_type != other.item_type:
return self.item_type > other.item_type
if self.item_type in {NewPeakQueueTypes.COIN_ID_SUBSCRIPTION, NewPeakQueueTypes.PUZZLE_HASH_SUBSCRIPTION}:
return True # All subscriptions are equal
return self.data[0].height >= other.data[0].height
class NewPeakQueue:
def __init__(self, inner_queue: asyncio.PriorityQueue):
self._inner_queue: asyncio.PriorityQueue = inner_queue
async def subscribe_to_coin_ids(self, coin_ids: List[bytes32]):
await self._inner_queue.put(NewPeakItem(NewPeakQueueTypes.COIN_ID_SUBSCRIPTION, coin_ids))
async def subscribe_to_puzzle_hashes(self, puzzle_hashes: List[bytes32]):
await self._inner_queue.put(NewPeakItem(NewPeakQueueTypes.PUZZLE_HASH_SUBSCRIPTION, puzzle_hashes))
async def full_node_state_updated(self, coin_state_update: CoinStateUpdate, peer: WSLittlelambocoinConnection):
await self._inner_queue.put(NewPeakItem(NewPeakQueueTypes.FULL_NODE_STATE_UPDATED, (coin_state_update, peer)))
async def new_peak_wallet(self, new_peak: NewPeakWallet, peer: WSLittlelambocoinConnection):
await self._inner_queue.put(NewPeakItem(NewPeakQueueTypes.NEW_PEAK_WALLET, (new_peak, peer)))
async def get(self) -> NewPeakItem:
return await self._inner_queue.get()
| StarcoderdataPython |
3378782 | <reponame>osoco/better-ways-of-thinking-about-software<filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/djangoapps/pipeline_js/utils.py
"""
Utilities for returning XModule JS (used by requirejs)
"""
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
def get_xmodule_urls():
"""
Returns a list of the URLs to hit to grab all the XModule JS
"""
pipeline_js_settings = settings.PIPELINE['JAVASCRIPT']["module-js"]
if settings.DEBUG:
paths = [path.replace(".coffee", ".js") for path in pipeline_js_settings["source_filenames"]]
else:
paths = [pipeline_js_settings["output_filename"]]
return [staticfiles_storage.url(path) for path in paths]
| StarcoderdataPython |
4828154 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""System tests for Google Cloud Build operators"""
import os
from itertools import product
import pytest
from airflow.providers.google.cloud.example_dags.example_gcs_to_sftp import (
BUCKET_SRC,
OBJECT_SRC_1,
OBJECT_SRC_2,
)
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
@pytest.mark.credential_file(GCP_GCS_KEY)
class GcsToSftpExampleDagsSystemTest(GoogleSystemTest):
@provide_gcp_context(GCP_GCS_KEY)
def setUp(self):
super().setUp()
# 1. Create buckets
self.create_gcs_bucket(BUCKET_SRC)
# 2. Prepare files
for bucket_src, object_source in product(
(BUCKET_SRC, "{}/subdir-1".format(BUCKET_SRC), "{}/subdir-2".format(BUCKET_SRC),),
(OBJECT_SRC_1, OBJECT_SRC_2),
):
source_path = "gs://{}/{}".format(bucket_src, object_source)
self.upload_content_to_gcs(
lines=f"{os.urandom(1 * 1024 * 1024)}", bucket=source_path, filename=object_source
)
@provide_gcp_context(GCP_GCS_KEY)
def test_run_example_dag(self):
self.run_dag("example_gcs_to_sftp", CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_GCS_KEY)
def tearDown(self):
self.delete_gcs_bucket(BUCKET_SRC)
super().tearDown()
| StarcoderdataPython |
3269052 | import pygame
import stale
from sprajtszit import SpriteSheet
class Bullet(pygame.sprite.Sprite):
fire_frames_r = []
fire_frames_l = []
def __init__(self,kierunek):
super().__init__()
self.direction = kierunek
sprite_sheet = SpriteSheet("Fiyah.png")
pocisk = sprite_sheet.get_image(0,0,72,58)
self.fire_frames_r.append(pocisk)
pocisk = pygame.transform.flip(pocisk, True, False)
self.fire_frames_l.append(pocisk)
pocisk = sprite_sheet.get_image(72,0, 72,58)
self.fire_frames_r.append(pocisk)
pocisk = pygame.transform.flip(pocisk, True, False)
self.fire_frames_l.append(pocisk)
pocisk = sprite_sheet.get_image(0,58, 72,58)
self.fire_frames_r.append(pocisk)
pocisk = pygame.transform.flip(pocisk, True, False)
self.fire_frames_l.append(pocisk)
pocisk = sprite_sheet.get_image(72,58, 72,58)
self.fire_frames_r.append(pocisk)
pocisk = pygame.transform.flip(pocisk, True, False)
self.fire_frames_l.append(pocisk)
pocisk = sprite_sheet.get_image(0,116, 72,58)
self.fire_frames_r.append(pocisk)
pocisk = pygame.transform.flip(pocisk, True, False)
self.fire_frames_l.append(pocisk)
pocisk = sprite_sheet.get_image(72,116, 72,58)
self.fire_frames_r.append(pocisk)
pocisk = pygame.transform.flip(pocisk, True, False)
self.fire_frames_l.append(pocisk)
if self.direction == "R":
self.image = self.fire_frames_r[0]
else:
self.image = self.fire_frames_l[0]
self.rect = self.image.get_rect()
def update(self):
if self.direction == "R":
self.rect.x += 6
frame = (self.rect.x // 30) % 6
self.image = self.fire_frames_r[frame]
else:
self.rect.x -= 6
frame = (self.rect.x // 30) % 6
self.image = self.fire_frames_l[frame]
| StarcoderdataPython |
3261281 | def regular_function(s):
return s.capitalize()
def user_of_function(words, f):
for w in words:
print(f(w))
data = ['a', 'b', 'c']
# equivalent behavior:
user_of_function(data, regular_function)
user_of_function(data, lambda w: w.capitalize())
user_of_function(data, lambda w: regular_function(w))
| StarcoderdataPython |
3226489 | <reponame>ambitiouscat/KBE_Ball<filename>Kbe_Svr/server_assets/scripts/common/GameConfigs.py
# -*- coding: utf-8 -*-
"""
"""
# ------------------------------------------------------------------------------
# entity state
# ------------------------------------------------------------------------------
ENTITY_STATE_UNKNOW = -1
ENTITY_STATE_SAFE = 0
ENTITY_STATE_FREE = 1
ENTITY_STATE_MAX = 4
# 一个房间最大人数
ROOM_MAX_PLAYER = 35
# 限制玩家最大分割数量
PLAYER_LIMIT_SPLIT = 16
# 一局游戏时间(秒)
GAME_ROUND_TIME = 720 #60 * 12
# 游戏中粮食和粉碎球平衡刷新时间
GAME_BALANCE_MASS_TIME = 1
# Bots机器人AI频率(秒)
BOTS_UPDATE_TIME = 0.3
# 地图大小(米)
GAME_MAP_SIZE = 200
#
TEST_RAND = 20
# 地图最大粮食数量
MAP_FOOD_MAX = 3000
# 玩家喷出的粮食最大数量
FIRE_FOOD_MAX = 20
# 地图最大粉碎者(病毒)数量
SMASH_MAX = 100
| StarcoderdataPython |
93306 | import tensorflow as tf
import numpy as np
ds = tf.contrib.distributions
def decode(z, observable_space_dims):
with tf.variable_scope('Decoder', [z]):
logits = tf.layers.dense(z, 200, activation=tf.nn.tanh)
logits = tf.layers.dense(logits, np.prod(observable_space_dims))
p_x_given_z = ds.Bernoulli(logits=logits)
return p_x_given_z
def encoder(x, observable_space_dim, latent_dim):
with tf.variable_scope('Encoder', [x]):
x = tf.reshape(x, [-1, np.prod(observable_space_dim)])
h = tf.layers.dense(x, 10, activation=tf.nn.tanh)
mu = tf.layers.dense(h, latent_dim)
sigma_sq = tf.layers.dense(h, latent_dim)
q_z_given_x = ds.MultivariateNormalDiag(mu, sigma_sq)
return q_z_given_x
| StarcoderdataPython |
58387 | <reponame>alliance-genome/agr_literature_service<filename>backend/app/literature/models/note_model.py<gh_stars>0
from datetime import datetime
from typing import Dict
import pytz
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import DateTime
from sqlalchemy.orm import relationship
from literature.database.base import Base
class NoteModel(Base):
__tablename__ = 'notes'
__versioned__: Dict = {}
note_id = Column(
Integer,
primary_key=True,
autoincrement=True
)
reference_id = Column(
Integer,
ForeignKey('references.reference_id'),
index=True
)
reference = relationship(
'ReferenceModel',
back_populates="notes"
)
resource_id = Column(
Integer,
ForeignKey('resources.resource_id'),
index=True,
)
resource = relationship(
'ResourceModel',
back_populates="notes"
)
note = Column(
String(),
unique=False,
nullable=False
)
name = Column(
String(),
unique=False,
nullable=True
)
date_created = Column(
DateTime,
nullable=False,
default=datetime.now(tz=pytz.timezone('UTC'))
)
| StarcoderdataPython |
181624 | #Hacked Path to find package. Would no be needed when package is installed via pip
import sys
import os
sys.path.append(os.path.abspath('../pypedream'))
from pypedream import MaterialStream, Flowsheet, ThermodynamicSystem
import pypedream.database.purecomponents as pcdb
sys= ThermodynamicSystem("Test", "NRTL")
sys.addComponent(pcdb.Water())
sys.addComponent(pcdb.Ethanol())
sys.addComponent(pcdb.Methanol())
sys.addComponent(pcdb.Acetone())
sys.addComponent(pcdb.Isopropanol())
sys.addComponent(pcdb.Benzene())
sys.addComponent(pcdb.Toluene())
sys.fill()
f= Flowsheet("Test",sys)
S001=f.mstr("S001")
#f.mstr("S001").fvpz(100, 0.5,1000,[ ("Water",0.5), ("Methanol",0.5) ])
print("\nFix Temperature\n\n")
f.mstr("S001").ftpz(100, 82.5,1000,[ ("Water",0.5), ("Methanol",0.5) ])
f.solve()
for v in S001.variables.values():
print(f"{v.fullName()} = {v.displayValue()} {v.displayUnit}")
print("\nBubble Point\n\n")
f.mstr("S001").fpx(100, 1000,[ ("Water",0.5), ("Methanol",0.5) ])
f.solve()
for v in S001.variables.values():
print(f"{v.fullName()} = {v.displayValue()} {v.displayUnit}")
print("\nDew Point\n\n")
f.mstr("S001").fpy(100, 1000,[ ("Water",0.5), ("Methanol",0.5) ])
f.solve()
for v in S001.variables.values():
print(f"{v.fullName()} = {v.displayValue()} {v.displayUnit}")
print("\nFix Temperature 25°C\n\n")
f.mstr("S001").ftpz(100,25,1000,[ ("Water",0.5), ("Methanol",0.5) ])
f.solve()
for v in S001.variables.values():
print(f"{v.fullName()} = {v.displayValue()} {v.displayUnit}")
for T in range(25,120):
f.mstr("S001").ftpz(100,T,1000,[ ("Water",0.5), ("Methanol",0.5) ])
f.solve(silent=True)
print(f"T = {S001.getVar('T').quantity()}, VF = {S001.getVar('VF').value}")
#print(S001)
'''
f= Flowsheet("Test",sys)
F01= f.unit("F01", "Flash", [f.mstr("S001")],[f.mstr("S002"),f.mstr("S003")])
f.mstr("S001").ftpz(100, 25,1,[ ("Water",0.5), ("Methanol",0.5) ])
f.unit("F01").spec([ ("VF",0.5,SI.none), ("P",1,METRIC.bar) ])
f.init()
f.solve()
print(f.report())
print(f.streamtable())
''' | StarcoderdataPython |
53597 | #!/usr/bin/env python
# Copyright (c) 2020 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# <https://github.com/boschresearch/amira-blender-rendering>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The scenes module contains scene managers for various setups."""
# base classes
from .basescenemanager import BaseSceneManager # noqa
from .baseconfiguration import BaseConfiguration # noqa
from .threepointlighting import ThreePointLighting # noqa
# composition classes, if inheritance should or cannot be used
from .rendermanager import RenderManager # noqa
# concrete scenes are autoimported later at the end of the file
import os
from functools import partial
from amira_blender_rendering.cli import _auto_import
_available_scenes = {}
def register(name: str, type: str = None):
"""Register a class/function to the specified available type.
This function should be used as a class decorator:
The name should be unique for the scene type that is being registered.
..code::
@register(name='awesome_sauce', type)
class AnotherClass(MyClass):
def __init__(self, ...)
...
Args:
name(str): Name for the scene to register
type(str): Either 'scene' or 'config' depending wheter the actual scene class
or the corresponding configuration class is registered
Returns:
The class that was passed as argument.
Raises:
ValueError: if invalid name/type given.
"""
def _register(obj, name, obj_type):
if obj_type not in ['scene', 'config']:
raise ValueError(f'Requested type {obj_type} is not available')
if name is None:
raise ValueError(f'Provide an appropriate name for the current scene of type {obj.__name__.lower()}')
if name not in _available_scenes:
_available_scenes[name] = dict()
_available_scenes[name][obj_type] = obj
return obj
return partial(_register, name=name, obj_type=type)
def get_registered(name: str = None):
"""
Return dictionary of available classes/function type registered via register(name, type)
Args:
name(str): name of registered object to query
"""
if name is None:
return _available_scenes
if name not in _available_scenes:
raise ValueError(f'Queried type "{name}" not among availables: {list(_available_scenes.keys())}')
return _available_scenes[name]
_auto_import(pkgname=__name__, dirname=os.path.dirname(__file__), subdirs=[''])
| StarcoderdataPython |
1620175 | <gh_stars>10-100
import logging
import os
import sys
from enum import Enum, IntEnum, unique
from typing import Tuple
@unique
class LogFormat(IntEnum):
stream = 0
color = 1
json = 2
syslog = 3
plain = 4
journald = 5
rich = 6
rich_tb = 7
@classmethod
def choices(cls) -> Tuple[str, ...]:
return tuple(cls._member_names_)
@classmethod
def default(cls) -> str:
journal_stream = os.getenv("JOURNAL_STREAM", "")
if journal_stream:
st_dev, st_ino = map(int, journal_stream.split(":", 1))
stat = os.stat(sys.stderr.fileno())
if stat.st_ino == st_ino and stat.st_dev == st_dev:
return cls.journald.name
if not os.isatty(sys.stderr.fileno()):
return cls.plain.name
try:
import rich # noqa
return cls.rich.name
except ImportError:
return cls.color.name
class LogLevel(IntEnum):
critical = logging.CRITICAL
error = logging.ERROR
warning = logging.WARNING
info = logging.INFO
debug = logging.DEBUG
notset = logging.NOTSET
@classmethod
def choices(cls) -> Tuple[str, ...]:
return tuple(cls._member_names_)
@classmethod
def default(cls) -> str:
return cls.info.name
class DateFormat(Enum):
color = "%Y-%m-%d %H:%M:%S"
stream = "[%Y-%m-%d %H:%M:%S]"
# Optimization: special value ``%s`` date will
# not formatted just returns record created time
json = "%s"
syslog = None
rich = "[%X]"
| StarcoderdataPython |
138532 | <reponame>TomasFisica/Redes_Prac_4<filename>tensor.py
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 10:35:58 2020
@author: tomas
"""
import numpy as np
import keras
from keras.layers import Dense
from keras.models import Sequential
import numpy as np
import copy
from matplotlib import pyplot as plt
# =============================================================================
# Funciones a utilizar
# =============================================================================
# =============================================================================
# Convertir los -1 en 1
# =============================================================================
# Convertir -1 en 0
"""
Function=lambda arg : 0 if arg==-1 else arg
Function2=lambda arg2: list(map(Function,arg2))
# Volver a convertir en array
Function3=lambda arg3: np.array(list(map(list, map(Function2,arg3))))
de=Function3(y_train)
de=np.array(de)
"""
class Xor():
def __init__(self,num_neuronas,num_entrada):
self.Num_Neuronas=num_neuronas
self.Num_Entrada=num_entrada
self.X_Train=0
self.Y_Train=0
self.Modelo=0
self.Historia=0
self.copiado1=lambda arg: [i.append(1) for i in arg]
self.copiado2=lambda arg: [i.append(0) for i in arg]
self.xor=lambda arg: 1 if np.count_nonzero(arg)%2==0 else 0
self.xor_2=lambda arg1: list(map(self.xor,arg1))
def Generar_x_train(self,lista):
lis1=copy.deepcopy(lista)
lis2=copy.deepcopy(lista)
self.copiado2(lis2)
self.copiado1(lis1)
if len(lis1+lis2)==2**self.Num_Entrada:
return lis1+lis2
pass
return self.Generar_x_train(lis1+lis2)
def Generar_datos(self):
"""Genero los datos"""
self.X_Train=self.Generar_x_train([[1],[0]])
self.X_Train=np.array(self.X_Train)
self.Y_Train=np.array(self.xor_2(self.X_Train)).reshape([2**self.Num_Entrada,1])
def Modelar(self):
self.Modelo = Sequential()
self.Modelo.add(Dense(units=self.Num_Neuronas,activation='tanh',input_dim=self.Num_Entrada,use_bias=True))
self.Modelo.add(Dense(units=1,activation='sigmoid'))
sgd=keras.optimizers.SGD(lr=0.01)
self.Modelo.compile(loss='binary_crossentropy',optimizer=sgd,metrics=['binary_accuracy'])
def Entrenar(self,num_epocas):
try:
self.Historia=self.Modelo.fit(self.X_Train,self.Y_Train,epochs=num_epocas,batch_size=1,shuffle=True)
except:
print("Debe ingresar un entero")
def Graficar(self):
Dicc_history=self.Historia.history
Epohs=len(Dicc_history["loss"])
plt.subplot(211)
p1,=plt.plot(range(Epohs),Dicc_history["loss"],"r")
plt.legend(["Loss"])
plt.title("Numero de Neuronas {}. Numero de entradas {}".format(self.Num_Neuronas,self.Num_Entrada))
plt.subplot(212)
p2,=plt.plot(range(Epohs),Dicc_history["binary_accuracy"],"r")
plt.legend(["Acc"])
def Programa(neu,inp,epo):
Mi_Xor=Xor(neu, inp)
Mi_Xor.Generar_datos()
Mi_Xor.Modelar()
Mi_Xor.Entrenar(epo)
Mi_Xor.Graficar()
if __name__=="__main__":
print("Para salir ingrese 0 neuronas y 0 entradas")
while True:
Neurona=int(input("Neurona: "))
Entrada=int(input("Entrada: "))
Epocas=int(input("Epocas: "))
if Neurona==Entrada and Neurona==0:
break
Programa(Neurona,Entrada,Epocas)
| StarcoderdataPython |
3395744 | """Collection of utilities to detect properties of the underlying architecture."""
from subprocess import PIPE, Popen
import numpy as np
import cpuinfo
import psutil
from devito.logger import warning
from devito.tools.memoization import memoized_func
__all__ = ['platform_registry',
'INTEL64', 'SNB', 'IVB', 'HSW', 'BDW', 'SKX', 'KNL', 'KNL7210',
'ARM',
'POWER8', 'POWER9']
@memoized_func
def get_cpu_info():
# Obtain textual cpu info
try:
with open('/proc/cpuinfo', 'r') as f:
lines = f.readlines()
except FileNotFoundError:
lines = []
cpu_info = {}
# Extract CPU flags and branch
if lines:
try:
get = lambda k: [i for i in lines if i.startswith(k)][0].split(':')[1].strip()
cpu_info['flags'] = get('flags').split()
cpu_info['brand'] = get('model name')
except IndexError:
# The /proc/cpuinfo format doesn't follow a standard, and on some
# more or less exotic combinations of OS and platform it might not
# be what we expect, hence ending up here
pass
if not all(i in cpu_info for i in ('flags', 'brand')):
# Fallback
ci = cpuinfo.get_cpu_info()
cpu_info['flags'] = ci.get('flags')
cpu_info['brand'] = ci.get('brand')
# Detect number of logical cores
logical = psutil.cpu_count(logical=True)
if not logical:
# Never bumped into a platform that make us end up here, yet
# But we try to cover this case anyway, with `lscpu`
try:
logical = lscpu()['CPU(s)']
except KeyError:
warning("Logical core count autodetection failed")
logical = 1
cpu_info['logical'] = logical
# Detect number of physical cores
# TODO: on multi-socket systems + unix, can't use psutil due to
# `https://github.com/giampaolo/psutil/issues/1558`
mapper = {}
if lines:
# Copied and readapted from psutil
current_info = {}
for i in lines:
line = i.strip().lower()
if not line:
# New section
if ('physical id' in current_info and 'cpu cores' in current_info):
mapper[current_info['physical id']] = current_info['cpu cores']
current_info = {}
else:
# Ongoing section
if (line.startswith('physical id') or line.startswith('cpu cores')):
key, value = line.split('\t:', 1)
current_info[key] = int(value)
physical = sum(mapper.values())
if not physical:
# Fallback 1: it should now be fine to use psutil
physical = psutil.cpu_count(logical=False)
if not physical:
# Fallback 2: we might end up here on more exotic platforms such a Power8
# Hopefully we can rely on `lscpu`
try:
physical = lscpu()['Core(s) per socket'] * lscpu()['Socket(s)']
except KeyError:
warning("Physical core count autodetection failed")
physical = 1
cpu_info['physical'] = physical
return cpu_info
@memoized_func
def lscpu():
try:
p1 = Popen(['lscpu'], stdout=PIPE, stderr=PIPE)
except OSError:
return {}
output, _ = p1.communicate()
if output:
lines = output.decode("utf-8").strip().split('\n')
mapper = {}
for k, v in [tuple(i.split(':')) for i in lines]:
try:
mapper[k] = int(v)
except ValueError:
mapper[k] = v.strip()
return mapper
else:
return {}
@memoized_func
def get_platform():
"""Attempt Platform autodetection."""
# TODO: cannot autodetect the following platforms yet:
# ['arm', 'power8', 'power9']
try:
# First, try leveraging `gcc`
p1 = Popen(['gcc', '-march=native', '-Q', '--help=target'],
stdout=PIPE, stderr=PIPE)
p2 = Popen(['grep', 'march'], stdin=p1.stdout, stdout=PIPE)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
output, _ = p2.communicate()
platform = output.decode("utf-8").split()[1]
# Full list of possible `platform` values at this point at:
# https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
platform = {'sandybridge': 'snb', 'ivybridge': 'ivb', 'haswell': 'hsw',
'broadwell': 'bdw', 'skylake': 'skx', 'knl': 'knl'}[platform]
return platform_registry[platform]
except:
pass
# No luck so far; try instead from the brand name
try:
cpu_info = get_cpu_info()
platform = cpu_info['brand'].split()[4]
platform = {'v2': 'ivb', 'v3': 'hsw', 'v4': 'bdw', 'v5': 'skx'}[platform]
return platform_registry[platform]
except:
pass
# Stick to default
return CPU64
class Platform(object):
def __init__(self, name, **kwargs):
self.name = name
cpu_info = get_cpu_info()
self.cores_logical = kwargs.get('cores_logical', cpu_info['logical'])
self.cores_physical = kwargs.get('cores_physical', cpu_info['physical'])
self.isa = kwargs.get('isa', self._detect_isa())
def __call__(self):
return self
def __str__(self):
return self.name
def __repr__(self):
return "TargetPlatform[%s]" % self.name
def _detect_isa(self):
return 'unknown'
@property
def threads_per_core(self):
return self.cores_logical // self.cores_physical
@property
def simd_reg_size(self):
"""Size in bytes of a SIMD register."""
return isa_registry.get(self.isa, 0)
def simd_items_per_reg(self, dtype):
"""Number of items of type ``dtype`` that can fit in a SIMD register."""
assert self.simd_reg_size % np.dtype(dtype).itemsize == 0
return int(self.simd_reg_size / np.dtype(dtype).itemsize)
class Cpu64(Platform):
# The known isas will be overwritten in the specialized classes
known_isas = tuple()
def _detect_isa(self):
for i in reversed(self.known_isas):
if any(j.startswith(i) for j in get_cpu_info()['flags']):
# Using `startswith`, rather than `==`, as a flag such as 'avx512'
# appears as 'avx512f, avx512cd, ...'
return i
return 'cpp'
class Intel64(Cpu64):
known_isas = ('cpp', 'sse', 'avx', 'avx2', 'avx512')
class Arm(Cpu64):
known_isas = ('fp', 'asimd', 'asimdrdm')
class Power(Cpu64):
def _detect_isa(self):
return 'altivec'
class Device(Platform):
def __init__(self, name, cores_logical=1, cores_physical=1, isa='cpp'):
self.name = name
self.cores_logical = cores_logical
self.cores_physical = cores_physical
self.isa = isa
# CPUs
CPU64 = Cpu64('cpu64')
INTEL64 = Intel64('intel64')
SNB = Intel64('snb')
IVB = Intel64('ivb')
HSW = Intel64('hsw')
BDW = Intel64('bdw')
SKX = Intel64('skx')
KNL = Intel64('knl')
KNL7210 = Intel64('knl', cores_logical=256, cores_physical=64, isa='avx512')
ARM = Arm('arm')
POWER8 = Power('power8')
POWER9 = Power('power9')
# Devices
NVIDIAX = Device('nvidiax')
platform_registry = {
'intel64': INTEL64,
'snb': SNB,
'ivb': IVB,
'hsw': HSW,
'bdw': BDW,
'skx': SKX,
'knl': KNL,
'knl7210': KNL7210,
'arm': ARM,
'power8': POWER8,
'power9': POWER9,
'nvidiaX': NVIDIAX
}
"""
Registry dict for deriving Platform classes according to the environment variable
DEVITO_PLATFORM. Developers should add new platform classes here.
"""
platform_registry['cpu64'] = get_platform # Autodetection
isa_registry = {
'cpp': 16,
'sse': 16,
'avx': 32,
'avx2': 32,
'avx512': 64,
'altivec': 16,
'fp': 8,
'asimd': 16,
'asimdrdm': 16
}
"""Size in bytes of a SIMD register in known ISAs."""
| StarcoderdataPython |
35588 | <reponame>wujingda/Human-in-the-loop-Deep-Reinforcement-Learning-Hug-DRL-
'''
This algorithm is a IA-RL implementation on off-policy TD3 algorithm, to check the original IA-RL algorithm
you can refer to https://arxiv.org/abs/1811.06187.
Since it is a baseline algorithm, the descriptions are mostly omitted, please visit the HUGTD3.py for more implementation details
'''
import pickle
import numpy as np
import torch
import torch.nn as nn
from TD3_based_DRL.priority_replay import Memory
from TD3_based_DRL.network_model import Actor,Critic
from TD3_based_DRL.util import hard_update, soft_update
seed = 2
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
MEMORY_CAPACITY = 38400
BATCH_SIZE = 128
GAMMA = 0.95
LR_C = 0.0005
LR_A = 0.0002
LR_I = 0.01
TAU = 0.001
POLICY_NOSIE = 0.2
POLICY_FREQ = 1
NOISE_CLIP = 0.5
class DRL:
def __init__(self, action_dim, state_dim, LR_C = LR_C, LR_A = LR_A):
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.state_dim = state_dim[0] * state_dim[1]
self.state_dim_width = state_dim[0]
self.state_dim_height = state_dim[1]
self.action_dim = action_dim
self.batch_size = BATCH_SIZE
self.gamma = GAMMA
self.tau = TAU
self.policy_noise = POLICY_NOSIE
self.noise_clip = NOISE_CLIP
self.policy_freq = POLICY_FREQ
self.itera = 0
self.pointer = 0
self.memory = Memory(MEMORY_CAPACITY)
self.actor = Actor(self.state_dim,self.action_dim).to(self.device)
self.actor_target = Actor(self.state_dim,self.action_dim).to(self.device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),LR_A)
self.critic = Critic(self.state_dim,self.action_dim).to(self.device)
self.critic_target = Critic(self.state_dim,self.action_dim).to(self.device)
self.critic_optimizers = torch.optim.Adam(self.critic.parameters(),LR_C)
hard_update(self.actor_target,self.actor)
hard_update(self.critic_target,self.critic)
def learn(self, batch_size = BATCH_SIZE, epoch=0):
## batched state, batched action, batched action from expert, batched intervention signal, batched reward, batched next state
bs, ba, ba_e, bi, br, bs_, tree_idx, ISweight = self.retrive(batch_size)
bs = torch.tensor(bs, dtype=torch.float).reshape(batch_size, self.state_dim_height, self.state_dim_width).to(self.device)
ba = torch.tensor(ba, dtype=torch.float).to(self.device).to(self.device)
ba_e = torch.tensor(ba_e, dtype=torch.float).to(self.device).to(self.device)
br = torch.tensor(br, dtype=torch.float).to(self.device).to(self.device)
bs_ = torch.tensor(bs_, dtype=torch.float).reshape(batch_size, self.state_dim_height, self.state_dim_width).to(self.device)
# initialize the loss variables
loss_c, loss_a = 0, 0
## calculate the predicted values of the critic
with torch.no_grad():
noise1 = (torch.randn_like(ba) * self.policy_noise).clamp(0, 1)
a_ = (self.actor_target(bs_).detach() + noise1).clamp(0, 1)
target_q1, target_q2 = self.critic_target([bs_,a_])
target_q1 = target_q1.detach()
target_q2 = target_q2.detach()
target_q = torch.min(target_q1,target_q2)
y_expected = br + self.gamma * target_q
y_predicted1, y_predicted2 = self.critic.forward([bs,ba])
errors = y_expected - y_predicted1
## update the critic
critic_loss = nn.MSELoss()
loss_critic = critic_loss(y_predicted1,y_expected)+critic_loss(y_predicted2,y_expected)
self.critic_optimizers.zero_grad()
loss_critic.backward()
self.critic_optimizers.step()
## update the actor
if self.itera % self.policy_freq == 0:
index1,_ = np.where(bi==0)
index2,_ = np.where(bi==1)
bs1,_,_,_=bs[index1],ba[index1],br[index1],bs_[index1]
bs2,ba2,_,_=bs[index2],ba[index2],br[index2],bs_[index2]
if bs2.size(0) != 0:
if bs1.size(0) != 0:
bs1 = torch.reshape(bs1,(len(bs1), self.state_dim_height, self.state_dim_width))
bs2 = torch.reshape(bs2,(len(bs2), self.state_dim_height, self.state_dim_width))
pred_a1 = self.actor.forward(bs1)
pred_a2 = self.actor.forward(bs2)
loss_actor1 = (-self.critic.forward([bs1,pred_a1])[0])
## fixed weight for human guidance actions
loss_actor2 = 3 * ((pred_a2 - ba2)**2)
loss_actor = torch.cat((loss_actor1,loss_actor2),0).mean()
else:
pred_a = self.actor.forward(bs)
loss_actor = 3*((pred_a - ba)**2)
loss_actor = loss_actor.mean()
else:
pred_a = self.actor.forward(bs)
loss_actor = (-self.critic.forward([bs,pred_a])[0]).mean()
self.actor_optimizer.zero_grad()
loss_actor.backward()
self.actor_optimizer.step()
soft_update(self.actor_target,self.actor,self.tau)
soft_update(self.critic_target,self.critic,self.tau)
loss_a = loss_actor.mean().item()
loss_c = loss_critic.mean().item()
self.itera += 1
self.memory.batch_update(tree_idx, abs(errors.detach().cpu().numpy()) )
return loss_c, loss_a
def choose_action(self,state):
state = torch.tensor(state,dtype=torch.float).reshape(self.state_dim_height, self.state_dim_width).to(self.device)
state = state.unsqueeze(0)
action = self.actor.forward(state).detach()
action = action.squeeze(0).cpu().numpy()
action = np.clip(action,-1, 1)
return action
def store_transition(self, s, a, a_e, i, r, s_):
transition = np.hstack((s, a, a_e, i, r, s_))
self.memory.store(transition)
self.pointer += 1
def retrive(self, batch_size):
tree_index, bt, ISWeight = self.memory.sample(batch_size)
bs = bt[:, :self.state_dim]
ba = bt[:, self.state_dim: self.state_dim + self.action_dim]
ba_e = bt[:, self.state_dim + self.action_dim: self.state_dim + self.action_dim + self.action_dim]
bi = bt[:, -self.state_dim - 2: -self.state_dim - 1]
br = bt[:, -self.state_dim - 1: -self.state_dim]
bs_ = bt[:, -self.state_dim:]
return bs, ba, ba_e, bi, br, bs_, tree_index, ISWeight
def memory_save(self):
per = open("memory_IARL.pkl", 'wb')
str = pickle.dumps(self.memory)
per.write(str)
per.close()
def memory_load(self):
with open("memory_IARL.pkl",'rb') as file:
self.memory = pickle.loads(file.read())
def load_model(self, output):
if output is None: return
self.actor.load_state_dict(torch.load('{}/actor.pkl'.format(output)))
self.critic.load_state_dict(torch.load('{}/critic.pkl'.format(output)))
def save_model(self, output):
torch.save(self.actor.state_dict(), '{}/actor.pkl'.format(output))
torch.save(self.critic.state_dict(), '{}/critic.pkl'.format(output))
def save(self, log_dir, epoch):
state = {'actor':self.actor.state_dict(), 'actor_target':self.actor_target.state_dict(),
'actor_optimizer':self.actor_optimizer.state_dict(),
'critic':self.critic.state_dict(), 'critic_target':self.critic_target.state_dict(),
'critic_optimizers':self.critic_optimizers.state_dict(),
'epoch':epoch}
torch.save(state, log_dir)
def load(self, log_dir):
checkpoint = torch.load(log_dir)
self.actor.load_state_dict(checkpoint['actor'])
self.actor_target.load_state_dict(checkpoint['actor_target'])
self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
self.critic.load_state_dict(checkpoint['critic'])
self.critic_target.load_state_dict(checkpoint['critic_target'])
self.critic_optimizers.load_state_dict(checkpoint['critic_optimizers'])
| StarcoderdataPython |
1604870 | import matplotlib
import scipy
__version__="01.00.00"
__author__ ="<NAME>"
ZRS =(0 , 78)
UAS1 =(281,303)
UAS2 =(389,411)
TATA =(551,557)
TSS = 607
ORF =(652,2055)
LEXA =(2140,2246)
LENGTH=2246
NUCLEOSOME_SIZE =147
NUCLEOSOME_CUTOFF=90
class Configuration:
"""Configuration is simply a representation of a promoter configuration,
it holds references to the bubble assigned to the N-1 N-2 and N-3 Positions.
"""
# --Pho5--0->
# 5'--------3'
# N-3,N-2,N-1
MAP={ 0:(1,1,1),
1:(1,1,0),
2:(1,0,1),
3:(0,1,1),
4:(0,0,1),
5:(0,1,0),
6:(1,0,0),
7:(0,0,0)
}
IMAP={value:key for key,value in MAP.items()}
def __init__(self,N_3=None,N_2=None,N_1=None):
self.n3 = N_3
self.n2 = N_2
self.n1 = N_1
self.configuration = self.IMAP[( int(bool(self.n3)),
int(bool(self.n2)),
int(bool(self.n1)) )]
self.size = sum(self.MAP[self.configuration])
def __str__(self):
return(str(self.configuration))
def __len__(self):
return(sum(self.MAP[self.configuration]))
def __int__(self):
return(self.configuration)
def __eq__(self, other):
if isinstance(other, Configuration):
return self.configuration == other.configuration
elif isinstance(other, (int, float)):
return self.configuration == other
else:
return NotImplemented
#Robert's Regions
N1REGION=[UAS2[1]+24,TSS+NUCLEOSOME_SIZE//2+5]
N3REGION=[ZRS[0],UAS2[0]-35]
N2REGION=[UAS2[0]-83,UAS2[1]+NUCLEOSOME_CUTOFF+10]
#Saketh's Regions
sN1REGION=[490,580]
sN2REGION=[UAS2[0]-83,UAS2[1]+NUCLEOSOME_CUTOFF+10]
sN3REGION=[ZRS[0],UAS2[0]-35]
def _caller_(molecule):
"""
takes a molecule object and returns its estimation of the
configuration of the molecuels promoter as defined by
'Nucleosomal promoter variation generates gene expression noise'
<NAME> and <NAME>
The return type is the configuration Number.
"""
#Definitions for molecule Regions
n1 = [bub for bub in molecule.getOverlap(N1REGION[0],N1REGION[1],NUCLEOSOME_CUTOFF) if bub.isbubble]
n2 = [bub for bub in molecule.getExc(UAS2[0],UAS2[1]) if (bub.isbubble and bub.size >NUCLEOSOME_CUTOFF) ]
n3 = [bub for bub in molecule.getOverlap(N3REGION[0],N3REGION[1],NUCLEOSOME_CUTOFF) if bub.isbubble]
return(Configuration(n3,n2,n1))
def bincount_configuration(molecules, config_caller = _caller_):
return(scipy.bincount(
scipy.array(list(map(config_caller, molecules))).astype(int),
minlength= len(Configuration.MAP)))
def build_gene_plot(plt):
fig = plt.figure(facecolor='0.8')
ax2 = plt.axes([0.05,0.1 ,0.9 ,0.85])
ax1 = plt.axes([0.05,0.075,0.9 ,0.025])
ax1.set_axis_bgcolor('white')
ax2.set_axis_bgcolor('white')
ax1.tick_params(axis='y',which='both',top='off',bottom='off',labelleft='off',labelright='off',labelbottom='off')
ax1.tick_params(axis='x',which='both',top='off',bottom='off')
ax2.tick_params(axis='x',which='both',labelbottom='off')
ax2.grid(color='grey',linestyle='--',alpha=0.5)
ax1.set_xticks(range(-500,2250,250))
ax2.set_xticks(range(-500,2250,250))
[i.set_visible(True) for i in ax2.spines.values()]
[i.set_color('black') for i in ax2.spines.values()]
[i.set_linewidth(1) for i in ax2.spines.values()]
[i.set_visible(True) for i in ax1.spines.values()]
[i.set_color('black') for i in ax1.spines.values()]
[i.set_linewidth(1) for i in ax1.spines.values()]
ax2.get_xgridlines()[2].set_linestyle('-')
patches = [
matplotlib.patches.Rectangle((0 -TSS , 0), 30 , 1, facecolor='black' ), #lexa
matplotlib.patches.Rectangle((48 -TSS , 0), 30 , 1, facecolor='dimgrey'), #zrux
matplotlib.patches.Rectangle((281 -TSS , 0), 21 , 1, facecolor='red' ), #uas1
matplotlib.patches.Rectangle((391 -TSS , 0), 21 , 1, facecolor='red' ), #uass2
matplotlib.patches.Rectangle((551 -TSS , 0), 6 , 1, facecolor='green' ), #TATA
matplotlib.patches.Rectangle((607 -TSS , 0), 1484, 1, facecolor='cyan' ), #tss
matplotlib.patches.Rectangle((652 -TSS , 0), 1404, 1, facecolor='blue' ), #orf
matplotlib.patches.Rectangle((2140-TSS , 0), 106 , 1, facecolor='black' ), #lexa
]
[ax1.add_patch(p) for p in patches]
#ax1.grid(False)
ax2.set_ylim([0,1])
ax2.set_xlim([0-607,2246-607])
ax1.set_xlim([0-607,2246-607])
##arrow
#ax2.plot((0,0) ,(0, 0.075) ,linewidth=4, color='black')
#ax2.plot((0,120),(0.075,0.075) ,linewidth=4, color='black')
#ax2.arrow(80,0.075,0.01,0, color='black',head_starts_at_zero=True,head_length=35)
return(fig,ax2)
def build_config_plot(plt):
fig = plt.figure()
ax1 = plt.subplot2grid((10,4), (0,0), colspan=4,rowspan=8)
ax2 = plt.subplot2grid((10,4), (8 ,0),colspan=4,rowspan=2,sharex=ax1)
ax1.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax1.tick_params(axis='y', which='both',right='off')
ax2.tick_params(axis='y', which='both',left='off',right='off',labelright='on', labelleft='off')
ax2.tick_params(axis='x',which='both',top='off',bottom='off')
circles=[[matplotlib.patches.Circle((i,n+1),0.20,color='black') for n,c in enumerate(config) if c] for i,config in Configuration.MAP.items()]
circles=[c for l in circles for c in l]
boxes =[matplotlib.patches.Rectangle((i -0.75/2 ,0), 0.75, 4, lw=4,edgecolor='black',fill=False) for i in range(8)]
[ax2.add_artist(artist) for artist in boxes]
[ax2.add_artist(artist) for artist in circles]
ax2.set_yticklabels(["5'",'','N -3','','N -2','','N -1','',"3'"])
ax2.set_ylim([0,4])
ax2.set_xlim([-0.5,7.5])
return(fig,ax1)
def build_nuc_config_plot(plt):
fig = plt.figure()
ax1 = plt.subplot2grid((10,4), (0,0), colspan=4,rowspan=8)
ax2 = plt.subplot2grid((10,4), (8 ,0),colspan=4,rowspan=2,sharex=ax1)
ax1.tick_params(axis='y', which='both',right='off')
ax1.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax2.tick_params(axis='y', which='both',left='off',right='off',labelright='on', labelleft='off')
ax2.tick_params(axis='x',which='both',top='off',bottom='off')
b_circles=[[matplotlib.patches.Circle((i,n+1),0.20,color='black') for n,c in enumerate(config) if c] for i,config in enumerate([(1,0,0),(0,1,0),(0,0,1)])]
b_circles=[c for l in b_circles for c in l]
g_circles=[[matplotlib.patches.Circle((i,n+1),0.20,color='grey') for n,c in enumerate(config) if not c] for i,config in enumerate([(1,0,0),(0,1,0),(0,0,1)])]
g_circles=[c for l in g_circles for c in l]
boxes =[matplotlib.patches.Rectangle((i -0.75/2 ,0), 0.75, 4, lw=4,edgecolor='black',fill=False) for i in range(3)]
[ax2.add_artist(artist) for artist in boxes]
[ax2.add_artist(artist) for artist in b_circles]
[ax2.add_artist(artist) for artist in g_circles]
ax2.set_yticklabels(["5'",'','N -3','','N -2','','N -1','',"3'"],weight='bold')
ax2.set_xticklabels(['','N -3','','N -2','','N -1',''],weight='bold')
ax2.set_ylim([0,4])
ax2.set_xlim([-0.5,2.5])
return(fig,ax1)
| StarcoderdataPython |
148706 | <filename>p3/p3.py
import urllib
import xml.dom.minidom
from xml.dom.minidom import parse
class site:
def __init__(self):
self.name =""
self.country =""
self.short =""
self.lvl ="Ninguna"
self.address =""
self.lat =""
self.lon =""
def setName(self,name):
self.name = name.encode("UTF-8")
def setCountry(self, country, short):
self.country = country.encode("UTF-8")
self.short = short.encode("UTF-8")
def setlvl(self, lvl):
self.lvl=lvl.encode("UTF-8")
def setAddress(self, a):
self.address =a.encode("UTF-8")
def setCords(self, x, y ):
self.lon=x.encode("UTF-8")
self.lat=y.encode("UTF-8")
def __str__(self):
return "Informacion: \n Nombre: "+self.name+"\n Pais: "+self.country+"\n Nombre corto de pais: "+self.short+"\n Entidad de nivel 1: "+self.lvl+"\n Direccion formateada: "+self.address+"\n Longitud: "+self.lon+" Latitud: "+self.lat
serviceurl = 'http://maps.googleapis.com/maps/api/geocode/xml?'
def tratarDatos(fileName):
ArbolDOM = xml.dom.minidom.parse(fileName)
geocode = ArbolDOM.documentElement
status = geocode.getElementsByTagName("status")[0].childNodes[0].data
if(status != "OK"):
print ("Busqueda fallida.")
return
componentes = geocode.getElementsByTagName("address_component")
sitio = site()
#este for es para recorrer todos los address componente (en ellos necesitamos sacar info de ciertas etiquetas)
for componente in componentes:
type = componente.getElementsByTagName("type")[0]
typeText = type.childNodes[0].data
if typeText == "locality":
sitio.setName(componente.getElementsByTagName("short_name")[0].childNodes[0].data)
elif typeText == "country":
sitio.setCountry(componente.getElementsByTagName("long_name")[0].childNodes[0].data, componente.getElementsByTagName("short_name")[0].childNodes[0].data)
elif typeText == "administrative_area_level_1":
sitio.setlvl(componente.getElementsByTagName("long_name")[0].childNodes[0].data)
sitio.setAddress(geocode.getElementsByTagName("formatted_address")[0].childNodes[0].data)
location=geocode.getElementsByTagName("geometry")[0].getElementsByTagName("location")[0]
sitio.setCords(location.getElementsByTagName("lng")[0].childNodes[0].data ,location.getElementsByTagName("lat")[0].childNodes[0].data)
print(sitio)
def crearXML(fileName, data):
try:
f = open(fileName, "w")
for entrada in data:
f.write(str(entrada))
except IOError:
print("Busqueda incluye caracteres no validos (\/:*?\"<>|) \n")
return False
return True
def programa ():
while True:
busqueda = raw_input('Introduce tu busqueda ("stop" para salir): \n')
if (busqueda != 'stop'):
if(len(busqueda)>= 1):
try:
url = serviceurl + urllib.urlencode({'sensor':'false', 'address': busqueda})
networkObj = urllib.urlopen(url)
data = networkObj.read()
except IOError:
print("No tienes conexion a internet. \n")
exit()
if(crearXML(busqueda+'.xml', data)):
tratarDatos(busqueda+'.xml')
else:
print("Introduce una busqueda valida. \n")
else:
break
programa()
| StarcoderdataPython |
4361 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2017 Bitcraze AB
#
# Crazyflie Python Library
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Example scipts that allows a user to "push" the Crazyflie 2.0 around
using your hands while it's hovering.
This examples uses the Flow and Multi-ranger decks to measure distances
in all directions and tries to keep away from anything that comes closer
than 0.2m by setting a velocity in the opposite direction.
The demo is ended by either pressing Ctrl-C or by holding your hand above the
Crazyflie.
For the example to run the following hardware is needed:
* Crazyflie 2.0
* Crazyradio PA
* Flow deck
* Multiranger deck
"""
import logging
import sys
import time
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.positioning.motion_commander import MotionCommander
from cflib.utils.multiranger import Multiranger
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import matplotlib.patches as patches
URI = 'radio://0/80/2M'
if len(sys.argv) > 1:
URI = sys.argv[1]
# Only output errors from the logging framework
logging.basicConfig(level=logging.ERROR)
def is_close(range):
MIN_DISTANCE = 0.2 # m
if range is None:
return False
else:
return range < MIN_DISTANCE
if __name__ == '__main__':
# Initialize the low-level drivers (don't list the debug drivers)
cflib.crtp.init_drivers(enable_debug_driver=False)
rangeArray = []
cf = Crazyflie(rw_cache='./cache')
with SyncCrazyflie(URI, cf=cf) as scf:
with MotionCommander(scf) as motion_commander:
with Multiranger(scf) as multiranger:
motion_commander.start_turn_left(90)
rangeArray.append(multiranger.front)
time.sleep(0.05)
plt.plot(rangeArray) | StarcoderdataPython |
1682625 | <gh_stars>0
from django.contrib import admin
from core.models import InternetRating, Place, Rating
admin.site.register(InternetRating)
admin.site.register(Place)
admin.site.register(Rating)
| StarcoderdataPython |
4822119 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tidbit'
db.create_table('auxiliary_tidbit', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(default=u'Did you know', max_length=40)),
('icon', self.gf('django.db.models.fields.CharField')(max_length=15)),
('content', self.gf('django.db.models.fields.TextField')()),
('button_text', self.gf('django.db.models.fields.CharField')(max_length=100)),
('button_link', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('auxiliary', ['Tidbit'])
def backwards(self, orm):
# Deleting model 'Tidbit'
db.delete_table('auxiliary_tidbit')
models = {
'auxiliary.tidbit': {
'Meta': {'object_name': 'Tidbit'},
'button_link': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'button_text': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content': ('django.db.models.fields.TextField', [], {}),
'icon': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "u'Did you know'", 'max_length': '40'})
}
}
complete_apps = ['auxiliary'] | StarcoderdataPython |
3311717 | <filename>instructions.py
from binaryninja import InstructionTextToken, InstructionTextTokenType
import struct
# Type 1 instructions are those that take two operands.
TYPE1_INSTRUCTIONS = [
'mov', 'add', 'addc', 'subc', 'sub', 'cmp',
'dadd', 'bit', 'bic', 'bis', 'xor', 'and'
]
# Type 2 instructions are those that take one operand.
TYPE2_INSTRUCTIONS = [
'rrc', 'swpb', 'rra', 'sxt', 'push', 'call',
'reti', 'br'
]
# Type 3 instructions are (un)conditional branches. They do not
# take any operands, as the branch targets are always immediates
# stored in the instruction itself.
TYPE3_INSTRUCTIONS = [
'jnz', 'jz', 'jlo', 'jhs', 'jn', 'jge', 'jl',
'jmp'
]
InstructionNames = [
# No instructions use opcode 0
None,
# Type 2 instructions all start with 0x1 but then
# differentiate by three more bits:
# 0001 00 XXX .......
['rrc', 'swpb', 'rra', 'sxt', 'push', 'call', 'reti'],
# Type 3 instructions start with either 0x2 or 0x3 and
# then differentiate with the following three bits:
# 0010 XXX ..........
['jnz', 'jz', 'jlo', 'jhs'],
# 0011 XXX ..........
['jn', 'jge', 'jl', 'jmp'],
# Type 1 instructions all use the top 4 bits
# for their opcodes (0x4 - 0xf)
'mov',
'add',
'addc',
'subc',
'sub',
'cmp',
'dadd',
'bit',
'bic',
'bis',
'xor',
'and'
]
# InstructionMask and InstructionMaskShift are used to mask
# off the bits that are used for the opcode of type 2 and 3
# instructions.
InstructionMask = {
1: 0x380,
2: 0xc00,
3: 0xc00,
}
InstructionMaskShift = {
1: 7,
2: 10,
3: 10
}
# Some instructions can be either 2 byte (word) or 1 byte
# operations.
WORD_WIDTH = 0
BYTE_WIDTH = 1
# There are technically only four different operand modes, but
# certain mode/register combinations have different semantic
# meanings.
REGISTER_MODE = 0
INDEXED_MODE = 1
INDIRECT_REGISTER_MODE = 2
INDIRECT_AUTOINCREMENT_MODE = 3
SYMBOLIC_MODE = 4
ABSOLUTE_MODE = 5
IMMEDIATE_MODE = 6
CONSTANT_MODE0 = 7
CONSTANT_MODE1 = 8
CONSTANT_MODE2 = 9
CONSTANT_MODE4 = 10
CONSTANT_MODE8 = 11
CONSTANT_MODE_NEG1 = 12
OFFSET = 13
OperandLengths = [
0, # REGISTER_MODE
2, # INDEXED_MODE
0, # INDIRECT_REGISTER_MODE
0, # INDIRECT_AUTOINCREMENT_MODE
2, # SYMBOLIC_MODE
2, # ABSOLUTE_MODE
2, # IMMEDIATE_MODE
0, # CONSTANT_MODE0
0, # CONSTANT_MODE1
0, # CONSTANT_MODE2
0, # CONSTANT_MODE4
0, # CONSTANT_MODE8
0, # CONSTANT_MODE_NEG1
0, # OFFSET
]
Registers = [
'pc',
'sp',
'sr',
'cg',
'r4',
'r5',
'r6',
'r7',
'r8',
'r9',
'r10',
'r11',
'r12',
'r13',
'r14',
'r15'
]
OperandTokens = [
lambda reg, value: [ # REGISTER_MODE
InstructionTextToken(InstructionTextTokenType.RegisterToken, reg)
],
lambda reg, value: [ # INDEXED_MODE
InstructionTextToken(
InstructionTextTokenType.IntegerToken, hex(value), value),
InstructionTextToken(InstructionTextTokenType.TextToken, '('),
InstructionTextToken(InstructionTextTokenType.RegisterToken, reg),
InstructionTextToken(InstructionTextTokenType.TextToken, ')')
],
lambda reg, value: [ # INDIRECT_REGISTER_MODE
InstructionTextToken(InstructionTextTokenType.TextToken, '@'),
InstructionTextToken(InstructionTextTokenType.RegisterToken, reg)
],
lambda reg, value: [ # INDIRECT_AUTOINCREMENT_MODE
InstructionTextToken(InstructionTextTokenType.TextToken, '@'),
InstructionTextToken(InstructionTextTokenType.RegisterToken, reg),
InstructionTextToken(InstructionTextTokenType.TextToken, '+')
],
lambda reg, value: [ # SYMBOLIC_MODE
InstructionTextToken(
InstructionTextTokenType.CodeRelativeAddressToken, hex(value), value)
],
lambda reg, value: [ # ABSOLUTE_MODE
InstructionTextToken(InstructionTextTokenType.TextToken, '&'),
InstructionTextToken(
InstructionTextTokenType.PossibleAddressToken, hex(value), value)
],
lambda reg, value: [ # IMMEDIATE_MODE
InstructionTextToken(
InstructionTextTokenType.PossibleAddressToken, hex(value), value)
],
lambda reg, value: [ # CONSTANT_MODE0
InstructionTextToken(InstructionTextTokenType.IntegerToken, str(0), 0)
],
lambda reg, value: [ # CONSTANT_MODE1
InstructionTextToken(InstructionTextTokenType.IntegerToken, str(1), 1)
],
lambda reg, value: [ # CONSTANT_MODE2
InstructionTextToken(InstructionTextTokenType.IntegerToken, str(2), 2)
],
lambda reg, value: [ # CONSTANT_MODE4
InstructionTextToken(InstructionTextTokenType.IntegerToken, str(4), 4)
],
lambda reg, value: [ # CONSTANT_MODE8
InstructionTextToken(InstructionTextTokenType.IntegerToken, str(8), 8)
],
lambda reg, value: [ # CONSTANT_MODE_NEG1
InstructionTextToken(
InstructionTextTokenType.IntegerToken, str(-1), -1)
],
lambda reg, value: [ # OFFSET
InstructionTextToken(
InstructionTextTokenType.PossibleAddressToken, hex(value), value)
]
]
class Operand:
def __init__(
self,
mode,
target=None,
width=None,
value=None,
operand_length=0
):
self._mode = mode
self._width = width
self._target = target
self._value = value
self._length = operand_length
@property
def mode(self):
return self._mode
@property
def width(self):
return self._width
@property
def target(self):
return self._target
@property
def value(self):
return self._value
@value.setter
def value(self, v):
self._value = v
@property
def operand_length(self):
return self._length
class SourceOperand(Operand):
@classmethod
def decode(cls, instr_type, instruction, address):
if instr_type == 3:
mode = OFFSET
target = None
width = None
else:
width = 1 if (instruction & 0x40) >> 6 else 2
# As is in the same place for Type 1 and 2 instructions
mode = (instruction & 0x30) >> 4
if instr_type == 2:
target = Registers[instruction & 0xf]
elif instr_type == 1:
target = Registers[(instruction & 0xf00) >> 8]
if target == 'pc':
if mode == INDEXED_MODE:
mode = SYMBOLIC_MODE
elif mode == INDIRECT_AUTOINCREMENT_MODE:
mode = IMMEDIATE_MODE
elif target == 'cg':
if mode == REGISTER_MODE:
mode = CONSTANT_MODE0
elif mode == INDEXED_MODE:
mode = CONSTANT_MODE1
elif mode == INDIRECT_REGISTER_MODE:
mode = CONSTANT_MODE2
else:
mode = CONSTANT_MODE_NEG1
elif target == 'sr':
if mode == INDEXED_MODE:
mode = ABSOLUTE_MODE
elif mode == INDIRECT_REGISTER_MODE:
mode = CONSTANT_MODE4
elif mode == INDIRECT_AUTOINCREMENT_MODE:
mode = CONSTANT_MODE8
operand_length = OperandLengths[mode]
if instr_type == 3:
branch_target = (instruction & 0x3ff) << 1
# check if it's a negative offset
if branch_target & 0x600:
branch_target |= 0xf800
branch_target -= 0x10000
value = address + 2 + branch_target
return cls(mode, target, width, value, operand_length)
else:
return cls(mode, target, width, operand_length=operand_length)
class DestOperand(Operand):
@classmethod
def decode(cls, instr_type, instruction, address):
if instr_type != 1:
return None
width = 1 if (instruction & 0x40) >> 6 else 2
target = Registers[instruction & 0xf]
mode = (instruction & 0x80) >> 7
if target == 'sr' and mode == INDEXED_MODE:
mode = ABSOLUTE_MODE
operand_length = OperandLengths[mode]
return cls(mode, target, width, operand_length=operand_length)
class Instruction:
@classmethod
def decode(cls, data, address):
if len(data) < 2:
return None
emulated = False
instruction = struct.unpack('<H', data[0:2])[0]
# emulated instructions
if instruction == 0x4130:
return cls('ret', emulated=True)
opcode = (instruction & 0xf000) >> 12
mask = InstructionMask.get(opcode)
shift = InstructionMaskShift.get(opcode)
if None not in (mask, shift):
mnemonic = InstructionNames[opcode][(instruction & mask) >> shift]
else:
mnemonic = InstructionNames[opcode]
if mnemonic is None:
return None
if mnemonic in TYPE1_INSTRUCTIONS:
type_ = 1
elif mnemonic in TYPE2_INSTRUCTIONS:
type_ = 2
elif mnemonic in TYPE3_INSTRUCTIONS:
type_ = 3
src = SourceOperand.decode(type_, instruction, address)
dst = DestOperand.decode(type_, instruction, address)
length = 2 + src.operand_length + (dst.operand_length if dst else 0)
if len(data) < length:
return None
offset = 2
if src.operand_length:
src.value = struct.unpack('<H', data[offset:offset+2])[0]
offset += 2
if dst and dst.operand_length:
dst.value = struct.unpack('<H', data[offset:offset+2])[0]
# emulated instructions
if mnemonic == 'mov' and dst.target == 'pc':
mnemonic = 'br'
emulated = True
elif (
mnemonic == 'bis' and
dst.target == 'sr' and
src.value == 0xf0
):
return cls('dint', length=length, emulated=True)
return cls(
mnemonic,
type_,
src,
dst,
length,
emulated
)
def generate_tokens(self):
tokens = []
mnemonic = self.mnemonic
type_ = self.type
src = self.src
dst = self.dst
if src is not None and src.width == 1:
mnemonic += '.b'
tokens = [
InstructionTextToken(
InstructionTextTokenType.TextToken, '{:7s}'.format(mnemonic))
]
if type_ == 1:
tokens += OperandTokens[src.mode](src.target, src.value)
tokens += [InstructionTextToken(
InstructionTextTokenType.TextToken, ',')]
tokens += OperandTokens[dst.mode](dst.target, dst.value)
elif type_ == 2:
tokens += OperandTokens[src.mode](src.target, src.value)
elif type_ == 3:
tokens += OperandTokens[src.mode](src.target, src.value)
return tokens
def __init__(
self,
mnemonic,
type_=None,
src=None,
dst=None,
length=2,
emulated=False
):
self.mnemonic = mnemonic
self.src = src
self.dst = dst
self.length = length
self.emulated = emulated
self.type = type_
| StarcoderdataPython |
1786050 | import math
import emoji #Biblioteca baixada Do Python.org pypi
print(emoji.emojize('Olá, mundo :earth_africa:', use_aliases = True))
num = int (input(' digite um numero: '))
raiz = math.sqrt(num) #math.cell = arredonda para baixo
#math.floor = arredonda para cima
print('A raiz de {} é igual a {:.2f}'.format(num,raiz)) | StarcoderdataPython |
1601351 | <gh_stars>0
# -*- coding: utf-8 -*-
from webbrowser import open_new
from tkinter import filedialog
import tkinter as tk
import os
import sys
import base64
import shutil
import subprocess, shlex
def display_error_window(error_name, msg):
error_window = tk.Tk()
error_window.title(error_name)
permission_error_frame = tk.Frame(error_window)
tk.Label(
permission_error_frame,
text=msg
).pack(padx=5, pady=10, side=tk.TOP)
tk.Button(
permission_error_frame,
text="OK",
command=lambda: error_window.destroy()
).pack(side=tk.TOP)
permission_error_frame.pack(padx=15, pady=10)
def center_window(win):
win.withdraw()
win.update_idletasks()
x = (win.winfo_screenwidth() - win.winfo_reqwidth()) / 2
y = (win.winfo_screenheight() - win.winfo_reqheight()) / 2
win.geometry("+%d+%d" % (x, y))
win.deiconify()
def add_quotes(path):
return '"' + path + '"'
def remove_quotes(quoted_path):
return quoted_path[1:-1]
def double_backslash(path):
L = path.split('\\')
new_path = L[0]
for elem in L[1:]:
new_path += 2*'\\' + elem
return new_path
def find(name, path):
""" Search a file and return the first match.
:name: 'example.txt'
:path: the path to start looking for the file
"""
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
def is_gs_installed():
gs_default_inst_folder = 'C:/Program Files/gs'
if os.path.isdir(gs_default_inst_folder):
if find('gswin64c.exe', gs_default_inst_folder) != None:
return True
if find('gswin32c.exe', gs_default_inst_folder) != None:
return True
else:
return False
def get_abs_path_to_convert_exe():
""" Get from the PATH variable the abs path of the 'convert.exe' file in the
ImageMagick installation folder. Return the abs path if found, return False if not.
"""
listPATH = os.environ['PATH'].split(';')
for path in listPATH:
if 'ImageMagick' in path:
path_convert_exe = path + '\convert.exe'
return path_convert_exe
return None
def get_dir_and_name(abs_path_pdf):
""" Return the abs path of pdf directory AND the name of the PDF (without .pdf).
Example of returned dir path: C:/this/is/an/abs/path
"""
if not abs_path_pdf == '':
abs_path_dir = abs_path_pdf[:-4] # remove '.pdf' extension
name = ''
while abs_path_dir[-1] != '/':
name = abs_path_dir[-1] + name
abs_path_dir = abs_path_dir[:-1]
if not abs_path_dir[-2:] == ':/':
abs_path_dir = abs_path_dir[:-1]
return abs_path_dir, name
else:
return ''
def browse_in_explorer(filetype):
""" Return the abs path of the selected file.
:filetype: '.exe', '.pdf', etc...
"""
abs_path = filedialog.askopenfilename(
initialdir = "/",
title = "Select file",
filetypes = ((filetype[1:] + " files", "*"+filetype),("all files","*.*"))
)
return abs_path
def is_pdf(path_to_file):
if path_to_file[-4:] == '.pdf':
return True
return False
def replace_char_in_path(abs_path_to_file, old_char, new_char):
""" Replace all old_char characters in the abs path by a new_char.
Return the new abs path of the PDF file.
"""
pdf_dir, pdf_name = get_dir_and_name(abs_path_to_file)
# for the directory name and all parent directories.
if old_char in pdf_dir:
splitted_path = pdf_dir.split('/')
renamed_path = splitted_path[0]
tmp_new_path = splitted_path[0]
for i in range(1, len(splitted_path), 1):
subdir = splitted_path[i]
if old_char in subdir:
splitted_subdir = subdir.split(old_char)
renamed_subdir = ''
for element in splitted_subdir:
renamed_subdir += element + new_char
renamed_subdir = renamed_subdir[:-1]
renamed_path += '/' + renamed_subdir
tmp_new_path += '/' + splitted_path[i]
os.rename(tmp_new_path, renamed_path)
tmp_new_path = renamed_path
else:
renamed_path += '/' + splitted_path[i]
tmp_new_path += '/' + splitted_path[i]
pdf_dir = renamed_path
new_path = pdf_dir + '/' + pdf_name + '.pdf'
# (then) rename the file
if old_char in pdf_name:
splitted_name = pdf_name.split(old_char)
new_name = ''
for i in splitted_name:
new_name += i + new_char
new_name = new_name[:-1]
if pdf_dir[-2:] == ':/':
old_path = pdf_dir + pdf_name + '.pdf'
new_path = pdf_dir + new_name + '.pdf'
os.rename(old_path, new_path)
else:
old_path = pdf_dir + '/' + pdf_name + '.pdf'
new_path = pdf_dir + '/' + new_name + '.pdf'
os.rename(old_path, new_path)
return new_path
def create_output_dir(abs_path_pdf):
""" Create the output directory and return it as a string. """
pdf_dir, pdf_name = get_dir_and_name(abs_path_pdf)
if pdf_dir[-2:] == ':/':
output_dir = pdf_dir + pdf_name + "_converted_for_StylusLab"
else:
output_dir = pdf_dir + '/' + pdf_name + "_converted_for_StylusLab"
try:
os.mkdir(output_dir)
except FileExistsError:
shutil.rmtree(output_dir, ignore_errors=True)
try:
os.mkdir(output_dir)
except PermissionError:
display_error_window("Access denied", "Access denied. Please try again.")
return "Failed"
return output_dir
def conversion(abs_path_pdf, abs_path_convert_exe, density, int_transparent_bg):
pdf_name = get_dir_and_name(abs_path_pdf)[1]
abs_path_output_dir = create_output_dir(abs_path_pdf)
if abs_path_output_dir == "Failed":
return "Retry"
# Conversion PDF --> JPG
if abs_path_convert_exe == None \
or remove_quotes(abs_path_convert_exe)[-11:] != 'convert.exe':
print("The path to 'convert.exe' is wrong.")
return "Failed"
print("Convert PDF to images - may take a while...")
density_option = ' -density '+ density + ' '
if int_transparent_bg == 1:
transparency_option = ' -background white -alpha remove -strip '
else:
transparency_option = ''
print_version_cmd = abs_path_convert_exe + ' -version'
conversion_cmd = abs_path_convert_exe + \
density_option + transparency_option + \
' ' + abs_path_pdf + ' ' + abs_path_output_dir + '/' + pdf_name + '.jpg'
print(conversion_cmd)
subprocess.call(shlex.split(print_version_cmd + ' && ' + conversion_cmd))
# Getting the number of pages
list_img = [name for name in os.listdir(abs_path_output_dir)]
number_of_pages = len(list_img)
print("Number of pages : ", number_of_pages)
# Conversion JPG --> SVG and creation of the body of the HTML file
SVG_START = '''<svg width='1101px' height='1976px' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'>
<defs> <style type='text/css'><![CDATA[
path { stroke-linecap: round; stroke-linejoin: round; }
.ruleline { shape-rendering: crispEdges; }
]]></style> </defs>
<g id='page_1' width='1101.000' height='1660.000' xruling='0.000' yruling='40.000' marginLeft='100.000' papercolor='#FFFFFF' rulecolor='#FF0000FF'>
'''
SVG_END = '''
</g>
</svg>
'''
body_string = ''
page_number = 0
for img in list_img:
# JPG -> SVG
page_number += 1
print("Encoding Image : " + img)
path_to_img = abs_path_output_dir + '/' + img
jpg_file = open(path_to_img, 'rb')
base64data = base64.b64encode(jpg_file.read())
base64string = '<image xlink:href="data:image/jpg;base64,{0}" width="1101" '\
'height="1660" x="0" y="0" />'.format(str(base64data)[2:-1])
svg_string = SVG_START + base64string + SVG_END
page_name = pdf_name + '_page{:03}.svg'.format(page_number)
with open(abs_path_output_dir + '/' + page_name, 'w') as sv:
sv.write(svg_string)
# Generating the thumbail
if page_number == 1:
print("Generating thumbail")
thumbail_data_uri = base64data.decode("ascii")
del jpg_file
os.remove(path_to_img)
# add svg source to the body of the html
body_string += '''<object data="{0}" type="image/svg+xml" width="1101" height="1660"></object>\n'''.format(page_name)
# Generating the HTML file
html_string = '''<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Strict//EN'
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>{}</title>
<script type="text/writeconfig">
<int name="pageNum" value="0" />
<float name="xOffset" value="-390.196" />
<float name="yOffset" value="-73.5294" />
</script>
</head>
<body>
<img id='thumbnail' style='display:none;' src='data:image/jpg;base64,{}'/>
{}
</body>
</html>'''.format(pdf_name, thumbail_data_uri, body_string)
print("Saving HMTL file")
with open(abs_path_output_dir + '/' + pdf_name +'.html', 'w') as ht:
ht.write(html_string)
return abs_path_output_dir
def callback(url):
open_new(url)
class MainWindow:
def __init__(self, window):
self.window = window
self.window.title("Convert PDF for StylusLab")
self.frPathConvertNotFound = tk.Frame(self.window)
self.bSelectConvertExe = tk.Button(self.frPathConvertNotFound)
self.lAbsPathConvertExe = tk.Label(self.frPathConvertNotFound)
self.frSelectPDF = tk.Frame(self.window)
self.frChoicePDF = tk.Frame(self.frSelectPDF)
self.bChoosePDF = tk.Button(self.frChoicePDF)
self.lAbsPathPDF = tk.Label(self.frChoicePDF)
self.abs_path_pdf = ""
self.frWhitespaceError = tk.Frame(self.frSelectPDF)
self.lWhpaceErrorMsg = tk.Label(self.frWhitespaceError)
self.bChooseAnotherPDF = tk.Button(self.frWhitespaceError)
self.bRename = tk.Button(self.frWhitespaceError)
self.frOptions = tk.Frame(self.window)
self.frDensityOption = tk.Frame(self.frOptions)
self.frDensity = tk.Frame(self.frDensityOption)
self.eDensityValue = tk.Entry(self.frDensity)
self.frDensityDescription = tk.Frame(self.frDensityOption)
self.frTransparencyOption = tk.Frame(self.frOptions)
self.PDFHasTranspBackground = tk.IntVar(self.frTransparencyOption)
self.cbBgIsTransparent = tk.Checkbutton(self.frTransparencyOption)
self.frConversionLauncher = tk.Frame(self.window)
self.bLaunch = tk.Button(self.frConversionLauncher)
def set_icon(self, filename):
self.window.iconbitmap(filename)
def display_title(self):
tk.Label(
self.window,
text="Import PDF into Stylus Lab 'Write'",
font=('Helvetica', 18, 'bold')
).pack()
url_github_repo = "https://github.com/hello-d4n/import_PDF_into_StylusLab_Write"
link = tk.Label(
self.window,
text = "https://github.com/hello-d4n/import_PDF_into_StylusLab_Write",
font=('Helvetica', 8, 'italic'),
cursor="hand2"
)
link.pack()
link.bind("<Button-1>", lambda e: open_new(url_github_repo))
def display_gs_not_found_error(self):
frGsNotFound = tk.Frame(highlightbackground="black", highlightthickness=1)
tk.Label(
frGsNotFound,
text="It seems that GhostScript is not installed on your computer.\n" \
"Please make sure to install GhostScript. You can download it here :",
font=('Helvetica', 10, 'bold')
).pack()
lDownloadLink = tk.Label(
frGsNotFound,
text = "https://www.ghostscript.com/download/gsdnld.html (choose AGP Licence)",
font=('Helvetica', 9, 'italic'),
foreground="blue",
cursor="hand2"
)
lDownloadLink.pack(pady=5)
link = "https://www.ghostscript.com/download/gsdnld.html"
lDownloadLink.bind("<Button-1>", lambda e: open_new(link))
tk.Label(
frGsNotFound,
text="If GhostScript is already installed on your computer, ignore "\
"this message.",
font=('Helvetica', 10, 'bold')
).pack()
frGsNotFound.pack(pady=20)
def set_abs_path_to_convert_exe(self, abs_path_to_convert_exe):
""" Method only used if convert.exe is in PATH variable. """
self.lAbsPathConvertExe.configure(text=abs_path_to_convert_exe)
def when_bSelectConvertExe_clicked(self):
choosen_path = browse_in_explorer('.exe')
if not choosen_path == '':
if choosen_path[-4:] == '.exe':
if ' ' in choosen_path:
choosen_path = add_quotes(choosen_path)
L = choosen_path.split('/')
choosen_path = L[0]
for elem in L[1:]:
choosen_path += 2*'\\' + elem
self.lAbsPathConvertExe.configure(text=choosen_path)
else:
self.lAbsPathConvertExe.configure(text=choosen_path)
else:
display_error_window("Filetype Error", "Please choose a executable file.")
def display_get_convert_manually(self):
""" Display a label and a button to select 'convert.exe' abs path in
explorer.
"""
tk.Label(
self.frPathConvertNotFound,
text="'convert.exe' was not found in PATH variable.\n"\
"Please make sure ImageMagick is installed.\nYou can use the download link below :",
font=('Helvetica', 10, 'bold')
).pack()
lDownloadLink = tk.Label(
self.frPathConvertNotFound,
text = "ftp://ftp.imagemagick.org/pub/ImageMagick/binaries\n"\
"(if you dont not which file to download, try one that ends by Q16-x86-static.exe)",
font=('Helvetica', 9, 'italic'),
foreground="blue",
cursor="hand2"
)
lDownloadLink.pack(pady=5)
link = "ftp://ftp.imagemagick.org/pub/ImageMagick/binaries"
lDownloadLink.bind("<Button-1>", lambda e: open_new(link))
tk.Label(
self.frPathConvertNotFound,
text="If ImageMagick is already installed or if you use the Portable version,\n"\
"then please select the file :",
font=('Helvetica', 10, 'bold')
).pack()
self.bSelectConvertExe.configure(
text="Select 'convert.exe'",
command=self.when_bSelectConvertExe_clicked
)
self.bSelectConvertExe.pack(padx=5)
self.lAbsPathConvertExe.configure(text="")
self.lAbsPathConvertExe.pack(padx=5)
self.frPathConvertNotFound.configure(highlightbackground="black", highlightthickness=1)
self.frPathConvertNotFound.pack(padx=10, pady=10)
def get_AbsPathConvertExe(self):
return self.lAbsPathConvertExe.cget("text")
def when_bChooseAnotherPDF_clicked(self):
self.frWhitespaceError.pack_forget()
self.lAbsPathPDF.configure(text="")
self.bChoosePDF.configure(state="normal")
self.bLaunch.pack(pady=10)
def when_bRename_clicked(self, choosen_path):
try:
renamed_path = replace_char_in_path(choosen_path, ' ', '_')
except PermissionError:
error_msg = "It seems that the programm does not have permission to rename\n" \
"the name or the parent directory(ies) of the PDF file. \n\n" \
"Try to remove from the filename (or folder name) the following characters :\n" \
" '%' '@' '~' ':' '<' '>' '?' '!' '*' '|'\t and retry.\n\n" \
"If it still does not work, then pleace retry with Administrator Privileges\n" \
"(right click on the exe file and click 'Run as administrator')."
display_error_window("Permission Error", error_msg)
if 'renamed_path' in locals():
self.frWhitespaceError.pack_forget()
self.abs_path_pdf = renamed_path
self.lAbsPathPDF.configure(text=renamed_path, foreground="green")
self.bChoosePDF.configure(state="normal")
self.bLaunch.configure(state="normal")
self.bLaunch.pack(pady=10)
def when_bChoosePDF_clicked(self):
self.lAbsPathPDF.configure(text=browse_in_explorer('.pdf'))
choosen_path = self.lAbsPathPDF.cget("text")
if is_pdf(choosen_path):
whitespace = ' '
if whitespace in choosen_path:
self.lAbsPathPDF.configure(foreground="red4")
self.bLaunch.configure(state="disabled")
self.bChoosePDF.configure(state="disabled")
self.lWhpaceErrorMsg.configure(
text="Whitespace character(s) have benn detected in the filename or path of the PDF.\n" \
"To work properly, this programm needs that the PDF filename or path does not include \n" \
"whitespace character(s). To solve this problem, the file must be renamed by \n" \
"replacing each whitespace character by an underscore symbol ( _ ). \n" \
"What would you like to do ?",
foreground="blue"
)
self.lWhpaceErrorMsg.pack()
self.bRename.configure(
text="Rename file",
foreground="blue",
command=lambda: self.when_bRename_clicked(choosen_path)
)
self.bRename.pack(side=tk.TOP)
self.bChooseAnotherPDF.configure(
text="Choose Another PDF",
foreground="blue",
command=self.when_bChooseAnotherPDF_clicked
)
self.bChooseAnotherPDF.pack(side=tk.TOP)
self.frWhitespaceError.pack(padx=10, pady=10)
self.frSelectPDF.pack()
else:
self.lAbsPathPDF.configure(foreground="green4")
self.bLaunch.configure(state="normal")
else:
if not choosen_path == '':
print(choosen_path)
display_error_window("Filetype Error", "Please choose a PDF file.")
def display_pdf_selection(self):
tk.Label(
text="Please select the PDF file you want to import in Stylus Lab 'Write' "
).pack(side=tk.TOP)
self.bChoosePDF.configure(
text="Choose PDF",
command=self.when_bChoosePDF_clicked
)
self.bChoosePDF.pack(padx=5, pady=5, side=tk.LEFT)
self.lAbsPathPDF.configure(text="")
self.lAbsPathPDF.pack(padx=5, pady=5, side=tk.RIGHT)
self.frChoicePDF.pack()
self.frSelectPDF.pack(padx=10)
def display_options(self):
self.cbBgIsTransparent.configure(
text="PDF has transparent background (check this box if the output document "\
"has a black background)",
variable=self.PDFHasTranspBackground,
offvalue=0,
onvalue=1
)
self.cbBgIsTransparent.pack(padx=10, side=tk.TOP)
self.frTransparencyOption.pack(side=tk.TOP)
tk.Label(
self.frDensity,
text="ImageMagick Density"
).pack(padx=10, pady=10, side=tk.LEFT)
self.eDensityValue.insert(0, str(200))
self.eDensityValue.configure(width=8)
self.eDensityValue.pack(padx=5, pady=3, side=tk.LEFT)
self.frDensity.pack(side=tk.TOP)
tk.Label(
self.frDensityDescription,
text="Density is usually an integer between 100 and 500. " \
"To change the text sharpening, try modifying the density value."
).pack(padx=10, pady=4)
link = tk.Label(
self.frDensityDescription,
text="More info on : https://imagemagick.org/script/command-line-options.php#density.",
cursor="hand2"
)
link.pack(padx=10, pady=4)
link.bind("<Button-1>", lambda e: callback("https://imagemagick.org/script/command-line-options.php#density"))
self.frDensityDescription.pack(side=tk.TOP)
self.frDensityOption.pack(side=tk.TOP)
self.frOptions.pack()
def when_bConvertAnotherPDF_clicked(self, frame_after_conversion):
frame_after_conversion.pack_forget()
self.bChoosePDF.configure(state="normal")
self.lAbsPathPDF.configure(text="")
self.eDensityValue.configure(state="normal")
self.bLaunch.pack(pady=10)
def when_bLaunch_clicked(self):
self.bChoosePDF.configure(state="disabled")
self.bLaunch.configure(state="disabled")
self.eDensityValue.configure(state="disabled")
abs_path_to_pdf = self.lAbsPathPDF.cget("text")
abs_path_to_convert = self.lAbsPathConvertExe.cget("text")
density = self.eDensityValue.get()
bool_transparent_bg = self.PDFHasTranspBackground.get()
print(bool_transparent_bg)
output_dir = conversion(abs_path_to_pdf, abs_path_to_convert, density, bool_transparent_bg)
frAfterConversion = tk.Frame(self.window)
if output_dir == "Failed":
tk.Label(
frAfterConversion,
text="Conversion FAILED :( ",
background="gray80"
).pack(pady=3)
tk.Button(
frAfterConversion,
text="Convert an other PDF or Retry",
command=lambda: self.when_bConvertAnotherPDF_clicked(frAfterConversion)
).pack()
elif output_dir == "Retry":
self.bChoosePDF.configure(state="normal")
self.bLaunch.configure(state="normal")
self.eDensityValue.configure(state="normal")
else:
tk.Label(
frAfterConversion,
text="PDF converted SUCCESSFULY. Converted document is located at :\n" + output_dir,
background="gray80"
).pack(pady=3)
tk.Button(
frAfterConversion,
text="Open folder in File Explorer",
command=lambda: os.startfile(output_dir)
).pack()
tk.Button(
frAfterConversion,
text="Convert an other PDF",
command=lambda: self.when_bConvertAnotherPDF_clicked(frAfterConversion)
).pack()
frAfterConversion.pack(padx=10, pady=5)
def display_conversion_launcher(self):
tk.Label(
self.frConversionLauncher,
text="\nConversion may take some time if the PDF have a lot of pages",
font=('helvetica 8 italic')
).pack()
self.bLaunch.configure(
text="Launch conversion",
command=self.when_bLaunch_clicked,
state="disabled"
)
self.bLaunch.pack(pady=10)
self.frConversionLauncher.pack(pady=5)
| StarcoderdataPython |
140800 |
def config_fgsm(targeted, adv_ys):
if targeted:
yname = 'y_target'
else:
yname = 'y'
fgsm_params = {yname: adv_ys,
'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.}
return fgsm_params
def config_bim(targeted, adv_ys):
if targeted:
yname = 'y_target'
else:
yname = 'y'
bim_params = {yname: adv_ys,
'eps': 0.3,
'eps_iter': 0.01,
'nb_iter': 100,
'clip_min': 0.,
'clip_max': 1.}
return bim_params
def config_mim(targeted, adv_ys):
if targeted:
yname = 'y_target'
else:
yname = 'y'
mim_params = {yname: adv_ys,
'eps': 0.1,
'eps_iter': 0.01,
'nb_iter': 100,
'decay_factor': 0.7,
'clip_min': 0.,
'clip_max': 1.}
return mim_params
def config_jsma(targeted, adv_ys):
if targeted:
yname = 'y_target'
else:
yname = 'y'
jsma_params = {yname: adv_ys,
'theta': 1.,
'gamma': 0.1,
'clip_min': 0.,
'clip_max': 1.}
return jsma_params
def config_vat(targeted, adv_ys):
if targeted:
yname = 'y_target'
else:
yname = 'y'
vat_params = {yname: adv_ys,
'eps': 2.0,
'xi': 1e-6,
'num_iterations': 10,
'clip_min': 0.,
'clip_max': 1.}
return vat_params
def config_cw(targeted, adv_ys):
if targeted:
yname = 'y_target'
else:
yname = 'y'
cw_params = {yname: adv_ys,
'max_iterations': 10000,
'binary_search_steps': 9,
'abort_early': True,
'confidence': 0.,
'learning_rate': 1e-2,
'initial_const': 1e-3,
'clip_min': 0.,
'clip_max': 1.}
return cw_params
def config_elastic(targeted, adv_ys):
if targeted:
yname = 'y_target'
else:
yname = 'y'
elastic_params = {yname: adv_ys,
'beta': 1e-3,
'confidence': 0.,
'learning_rate': 1e-2,
'binary_search_steps': 9,
'max_iterations': 1000,
'abort_early': False,
'initial_const': 1e-3,
'clip_min': 0.,
'clip_max': 1.}
return elastic_params
def config_deepfool(targeted, adv_ys):
if targeted:
yname = 'y_target'
else:
yname = 'y'
deepfool_params = {yname: adv_ys,
'nb_candidate': 10,
'overshoot': 0.02,
'max_iter': 50,
'clip_min': 0.,
'clip_max': 1.}
return deepfool_params
def config_madry(targeted, adv_ys):
if targeted:
yname = 'y_target'
else:
yname = 'y'
madry_params = {yname: adv_ys,
'eps': 0.3,
'eps_iter': 0.01,
'nb_iter': 40,
'clip_min': 0.,
'clip_max': 1.,
'rand_init': False}
return madry_params
| StarcoderdataPython |
1606808 |
# Third-Party
from dry_rest_permissions.generics import DRYPermissionsField
from rest_framework_json_api import serializers
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.core.validators import validate_email
from phonenumber_field.validators import validate_international_phonenumber
# Local
from .models import Group
from .models import Person
User = get_user_model()
validate_url = URLValidator()
class GroupSerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
AIC = {
503061: "Signature",
500983: "After Hours",
501972: "Main Street",
501329: "Forefront",
500922: "Instant Classic",
304772: "Musical Island Boys",
500000: "Masterpiece",
501150: "Ringmasters",
317293: "Old School",
286100: "Storm Front",
500035: "Crossroads",
297201: "OC Times",
299233: "Max Q",
302244: "Vocal Spectrum",
299608: "Realtime",
6158: "Gotcha!",
2496: "Power Play",
276016: "Four Voices",
5619: "Michigan Jake",
6738: "Platinum",
3525: "FRED",
5721: "Revival",
2079: "Yesteryear",
2163: "Nightlife",
4745: "Marquis",
3040: "Joker's Wild",
1259: "Gas House Gang",
2850: "Keepsake",
1623: "The Ritz",
3165: "Acoustix",
1686: "Second Edition",
492: "Chiefs of Staff",
1596: "Interstate Rivals",
1654: "Rural Route 4",
406: "The New Tradition",
1411: "Rapscallions",
1727: "Side Street Ramblers",
545: "Classic Collection",
490: "Chicago News",
329: "Boston Common",
4034: "Grandma's Boys",
318: "Bluegrass Student Union",
362: "Most Happy Fellows",
1590: "Innsiders",
1440: "Happiness Emporium",
1427: "Regents",
627: "Dealer's Choice",
1288: "Golden Staters",
1275: "Gentlemen's Agreement",
709: "Oriole Four",
711: "Mark IV",
2047: "Western Continentals",
1110: "Four Statesmen",
713: "Auto Towners",
715: "Four Renegades",
1729: "Sidewinders",
718: "Town and Country 4",
719: "Gala Lads",
1871: "The Suntones",
722: "Evans Quartet",
724: "Four Pitchikers",
726: "Gaynotes",
729: "Lads of Enchantment",
731: "Confederates",
732: "Four Hearsemen",
736: "The Orphans",
739: "Vikings",
743: "Four Teens",
746: "Schmitt Brothers",
748: "Buffalo Bills",
750: "Mid-States Four",
753: "Pittsburghers",
756: "Doctors of Harmony",
759: "Garden State Quartet",
761: "Misfits",
764: "Harmony Halls",
766: "Four Harmonizers",
770: "Elastic Four",
773: "Chord Busters",
775: "Flat Foot Four",
776: "Bartlsesville Barflies",
}
KIND = {
'quartet': Group.KIND.quartet,
'chorus': Group.KIND.chorus,
'chapter': Group.KIND.chapter,
'group': Group.KIND.noncomp,
'district': Group.KIND.district,
'organization': Group.KIND.international,
}
GENDER = {
'men': Group.GENDER.male,
'women': Group.GENDER.female,
'mixed': Group.GENDER.mixed,
}
DIVISION = {
'EVG Division I': Group.DIVISION.evgd1,
'EVG Division II': Group.DIVISION.evgd2,
'EVG Division III': Group.DIVISION.evgd3,
'EVG Division IV': Group.DIVISION.evgd4,
'EVG Division V': Group.DIVISION.evgd5,
'FWD Arizona': Group.DIVISION.fwdaz,
'FWD Northeast': Group.DIVISION.fwdne,
'FWD Northwest': Group.DIVISION.fwdnw,
'FWD Southeast': Group.DIVISION.fwdse,
'FWD Southwest': Group.DIVISION.fwdsw,
'LOL 10000 Lakes': Group.DIVISION.lol10l,
'LOL Division One': Group.DIVISION.lolone,
'LOL Northern Plains': Group.DIVISION.lolnp,
'LOL Packerland': Group.DIVISION.lolpkr,
'LOL Southwest': Group.DIVISION.lolsw,
'MAD Central': Group.DIVISION.madcen,
'MAD Northern': Group.DIVISION.madnth,
'MAD Southern': Group.DIVISION.madsth,
'NED Granite and Pine': Group.DIVISION.nedgp,
'NED Mountain': Group.DIVISION.nedmtn,
'NED Patriot': Group.DIVISION.nedpat,
'NED Sunrise': Group.DIVISION.nedsun,
'NED Yankee': Group.DIVISION.nedyke,
'SWD Northeast': Group.DIVISION.swdne,
'SWD Northwest': Group.DIVISION.swdnw,
'SWD Southeast': Group.DIVISION.swdse,
'SWD Southwest': Group.DIVISION.swdsw,
}
STATUS = {
'64ad817f-f3c6-4b09-a1b0-4bd569b15d03': Group.STATUS.inactive, # revoked
'd9e3e257-9eca-4cbf-959f-149cca968349': Group.STATUS.inactive, # suspended
'6e3c5cc6-0734-4edf-8f51-40d3a865a94f': Group.STATUS.inactive, # merged
'bd4721e7-addd-4854-9888-8a705725f748': Group.STATUS.inactive, # closed
'e04744e6-b743-4247-92c2-2950855b3a93': Group.STATUS.inactive, # expired
'55a97973-02c3-414a-bbef-22181ad46e85': Group.STATUS.active, # pending
'bb1ee6f6-a2c5-4615-b6ad-76130c37b1e6': Group.STATUS.active, # pending voluntary
'd7102af8-013a-40e7-bc85-0b00766ed124': Group.STATUS.active, # awaiting
'f3facc00-1990-4c68-9052-39e066906a38': Group.STATUS.active, # prospective
'4bfee76f-3110-4c32-bade-e5044fdd5fa2': Group.STATUS.active, # licensed
'7b9e5e34-a7c5-4f1e-9fc5-656caa74b3c7': Group.STATUS.active, # active
}
def validate_name(self, name):
if name in self.AIC.values():
raise ValidationError("Can not choose name of AIC member")
return name
def validate_status(self, status):
status = self.STATUS.get(status, None)
if not status:
raise self.ValidationError("Status not specified.")
return status
def validate_kind(self, kind):
kind = self.KIND.get(kind, None)
if not kind:
raise self.ValidationError("Kind not specified.")
return kind
def validate_gender(self, gender):
gender = self.GENDER.get(gender, None)
if not gender:
raise ValidationError("Gender not specified.")
def validate_district(self, district):
return district
def validate_division(self, division):
division = self.DIVISION.get(division, None)
if not division:
raise ValidationError("Division not specified.")
def validate_bhs_id(self, bhs_id):
return bhs_id
def validate_code(self, code):
return code
def validate_website(self, website):
if website:
return validate_url(website)
return ''
def validate_email(self, email):
if email:
return validate_email(email)
return ''
def validate_phone(self, phone):
if phone:
return validate_international_phonenumber(phone)
return ''
def validate_fax_phone(self, fax_phone):
if fax_phone:
return validate_international_phonenumber(fax_phone)
return ''
def validate_start_date(self, start_date):
return start_date
def validate_end_date(self, end_date):
return end_date
def validate_facebook(self, facebook):
if facebook:
return validate_url(facebook)
return ''
def validate_twitter(self, twitter):
if twitter:
return validate_url(twitter)
return ''
def validate_youtube(self, youtube):
if youtube:
return validate_url(youtube)
return ''
def validate_pinterest(self, pinterest):
if pinterest:
return validate_url(pinterest)
return ''
def validate_flickr(self, flickr):
if flickr:
return validate_url(flickr)
return ''
def validate_instagram(self, instagram):
if instagram:
return validate_url(instagram)
return ''
def validate_soundcloud(self, soundcloud):
if soundcloud:
return validate_url(soundcloud)
return ''
def validate_visitor_information(self, visitor_information):
return visitor_information.strip() if visitor_information else ''
class Meta:
model = Group
fields = [
'id',
'url',
'name',
'status',
'kind',
'gender',
'district',
'division',
'bhs_id',
'code',
'website',
'email',
'phone',
'fax_phone',
'start_date',
'end_date',
'location',
'facebook',
'twitter',
'youtube',
'pinterest',
'flickr',
'instagram',
'soundcloud',
'image',
'description',
'visitor_information',
'participants',
'chapters',
'notes',
'tree_sort',
'is_senior',
'is_youth',
'owners',
'parent',
# 'children',
# 'repertories',
'permissions',
'nomen',
'image_id',
'created',
'modified',
]
read_only_fields = [
'nomen',
'image_id',
'created',
'modified',
]
class PersonSerializer(serializers.ModelSerializer):
permissions = DRYPermissionsField()
class Meta:
model = Person
fields = [
'id',
'url',
'status',
'prefix',
'first_name',
'middle_name',
'last_name',
'nick_name',
'suffix',
'birth_date',
'spouse',
'location',
'part',
'mon',
'gender',
'is_deceased',
'is_honorary',
'is_suspended',
'is_expelled',
'email',
'address',
'home_phone',
'work_phone',
'cell_phone',
'airports',
'image',
'description',
'notes',
'bhs_id',
'nomen',
'name',
'full_name',
'common_name',
'sort_name',
'initials',
'image_id',
'owners',
'current_through',
# 'current_status',
# 'current_district',
# 'owners',
'permissions',
'created',
'modified',
]
read_only_fields = [
'nomen',
'name',
'full_name',
'common_name',
'sort_name',
'initials',
'image_id',
# 'current_through',
# 'current_status',
# 'current_district',
'created',
'modified',
] | StarcoderdataPython |
3201893 | #!/usr/bin/env python
# coding: utf-8
r"""pressure.py tests"""
from corelib.units.pressure import millibar
def test_pressures():
r"""Test expected values"""
expected_value = 1e3
atol = 1e-10
assert expected_value - atol <= millibar(bar=1.) <= expected_value + atol
| StarcoderdataPython |
1621164 | <filename>robot_reply/migrations/0001_initial.py
# Generated by Django 2.2.6 on 2020-01-31 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='WechatRobotLog',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('open_id', models.CharField(blank=True, max_length=32, null=True)),
('user_text', models.CharField(blank=True, max_length=255, null=True)),
('reply_text', models.CharField(blank=True, max_length=255, null=True)),
('created_at', models.DateTimeField(blank=True, null=True)),
('user_name', models.CharField(blank=True, max_length=255, null=True)),
('others', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'db_table': 'wechat_robot_log',
'managed': False,
},
),
]
| StarcoderdataPython |
1675148 | import functools
import math
from math import sqrt
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
from models.diffusion.unet_diffusion import AttentionBlock
from models.gpt_voice.lucidrains_dvae import DiscreteVAE
from models.stylegan.stylegan2_rosinality import EqualLinear
from models.vqvae.vqvae import Quantize
from trainer.networks import register_model
from utils.util import opt_get
def default(val, d):
return val if val is not None else d
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
class ModulatedConv1d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
demodulate=True,
initial_weight_factor=1,
):
super().__init__()
self.eps = 1e-8
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
fan_in = in_channel * kernel_size ** 2
self.scale = initial_weight_factor / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(
torch.randn(1, out_channel, in_channel, kernel_size)
)
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def forward(self, input, style):
batch, in_channel, d = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3]) + 1e-8)
weight = weight * demod.view(batch, self.out_channel, 1, 1)
weight = weight.view(
batch * self.out_channel, in_channel, self.kernel_size
)
input = input.view(1, batch * in_channel, d)
out = F.conv1d(input, weight, padding=self.padding, groups=batch)
_, _, d = out.shape
out = out.view(batch, self.out_channel, d)
return out
class ChannelAttentionModule(nn.Module):
def __init__(self, channels_in, channels_out, attention_dim, layers, num_heads=1):
super().__init__()
self.channels_in = channels_in
self.channels_out = channels_out
# This is the bypass. It performs the same computation, without attention. It is responsible for stabilizing
# training early on by being more optimizable.
self.bypass = nn.Conv1d(channels_in, channels_out, kernel_size=1)
self.positional_embeddings = nn.Embedding(channels_out, attention_dim)
self.first_layer = ModulatedConv1d(1, attention_dim, kernel_size=1, style_dim=channels_in, initial_weight_factor=.1)
self.layers = nn.Sequential(*[AttentionBlock(attention_dim, num_heads=num_heads) for _ in range(layers)])
self.post_attn_layer = nn.Conv1d(attention_dim, 1, kernel_size=1)
def forward(self, inp):
bypass = self.bypass(inp)
emb = self.positional_embeddings(torch.arange(0, self.channels_out, device=inp.device)).permute(1,0).unsqueeze(0)
b, c, w = bypass.shape
# Reshape bypass so channels become structure and structure becomes part of the batch.
x = bypass.permute(0,2,1).reshape(b*w, c).unsqueeze(1)
# Reshape the input as well so it can be fed into the stylizer.
style = inp.permute(0,2,1).reshape(b*w, self.channels_in)
x = self.first_layer(x, style)
x = emb + x
x = self.layers(x)
x = x - emb # Subtract of emb to further stabilize early training, where the attention layers do nothing.
out = self.post_attn_layer(x).squeeze(1)
out = out.view(b,w,self.channels_out).permute(0,2,1)
return bypass + out
class ResBlock(nn.Module):
def __init__(self, chan, conv, activation):
super().__init__()
self.net = nn.Sequential(
conv(chan, chan, 3, padding = 1),
activation(),
conv(chan, chan, 3, padding = 1),
activation(),
conv(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class UpsampledConv(nn.Module):
def __init__(self, conv, *args, **kwargs):
super().__init__()
assert 'stride' in kwargs.keys()
self.stride = kwargs['stride']
del kwargs['stride']
self.conv = conv(*args, **kwargs)
def forward(self, x):
up = nn.functional.interpolate(x, scale_factor=self.stride, mode='nearest')
return self.conv(up)
class ChannelAttentionDVAE(nn.Module):
def __init__(
self,
positional_dims=2,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
num_resnet_blocks = 0,
hidden_dim = 64,
channel_attention_dim = 64,
channels = 3,
stride = 2,
kernel_size = 4,
use_transposed_convs = True,
encoder_norm = False,
activation = 'relu',
smooth_l1_loss = False,
straight_through = False,
normalization = None, # ((0.5,) * 3, (0.5,) * 3),
record_codes = False,
):
super().__init__()
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.num_tokens = num_tokens
self.num_layers = num_layers
self.straight_through = straight_through
self.codebook = Quantize(codebook_dim, num_tokens)
self.positional_dims = positional_dims
assert positional_dims > 0 and positional_dims < 3 # This VAE only supports 1d and 2d inputs for now.
if positional_dims == 2:
conv = nn.Conv2d
conv_transpose = nn.ConvTranspose2d
else:
conv = nn.Conv1d
conv_transpose = nn.ConvTranspose1d
if not use_transposed_convs:
conv_transpose = functools.partial(UpsampledConv, conv)
if activation == 'relu':
act = nn.ReLU
elif activation == 'silu':
act = nn.SiLU
else:
assert NotImplementedError()
enc_chans = [hidden_dim * 2 ** i for i in range(num_layers)]
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
pad = (kernel_size - 1) // 2
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(conv(enc_in, enc_out, kernel_size, stride = stride, padding = pad), act()))
if encoder_norm:
enc_layers.append(nn.GroupNorm(8, enc_out))
dec_layers.append(nn.Sequential(conv_transpose(dec_in, dec_out, kernel_size, stride = stride, padding = pad), act()))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1], conv, act))
enc_layers.append(ResBlock(enc_chans[-1], conv, act))
if num_resnet_blocks > 0:
dec_layers.insert(0, conv(codebook_dim, dec_chans[1], 1))
enc_layers.append(conv(enc_chans[-1], codebook_dim, 1))
dec_layers.append(ChannelAttentionModule(dec_chans[-1], channels, channel_attention_dim, layers=3, num_heads=1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
# take care of normalization within class
self.normalization = normalization
self.record_codes = record_codes
if record_codes:
self.codes = torch.zeros((1228800,), dtype=torch.long)
self.code_ind = 0
self.internal_step = 0
def norm(self, images):
if not self.normalization is not None:
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
arrange = 'c -> () c () ()' if self.positional_dims == 2 else 'c -> () c ()'
means, stds = map(lambda t: rearrange(t, arrange), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
def get_debug_values(self, step, __):
dbg = {}
if self.record_codes:
# Report annealing schedule
dbg.update({'histogram_codes': self.codes})
return dbg
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
img = self.norm(images)
logits = self.encoder(img).permute((0,2,3,1) if len(img.shape) == 4 else (0,2,1))
sampled, commitment_loss, codes = self.codebook(logits)
return codes
def decode(
self,
img_seq
):
image_embeds = self.codebook.embed_code(img_seq)
b, n, d = image_embeds.shape
kwargs = {}
if self.positional_dims == 1:
arrange = 'b n d -> b d n'
else:
h = w = int(sqrt(n))
arrange = 'b (h w) d -> b d h w'
kwargs = {'h': h, 'w': w}
image_embeds = rearrange(image_embeds, arrange, **kwargs)
images = self.decoder(image_embeds)
return images
def infer(self, img):
img = self.norm(img)
logits = self.encoder(img).permute((0,2,3,1) if len(img.shape) == 4 else (0,2,1))
sampled, commitment_loss, codes = self.codebook(logits)
return self.decode(codes)
# Note: This module is not meant to be run in forward() except while training. It has special logic which performs
# evaluation using quantized values when it detects that it is being run in eval() mode, which will be substantially
# more lossy (but useful for determining network performance).
def forward(
self,
img
):
img = self.norm(img)
logits = self.encoder(img).permute((0,2,3,1) if len(img.shape) == 4 else (0,2,1))
sampled, commitment_loss, codes = self.codebook(logits)
sampled = sampled.permute((0,3,1,2) if len(img.shape) == 4 else (0,2,1))
if self.training:
out = sampled
for d in self.decoder:
out = d(out)
else:
# This is non-differentiable, but gives a better idea of how the network is actually performing.
out = self.decode(codes)
# reconstruction loss
recon_loss = self.loss_fn(img, out, reduction='none')
# This is so we can debug the distribution of codes being learned.
if self.record_codes and self.internal_step % 50 == 0:
codes = codes.flatten()
l = codes.shape[0]
i = self.code_ind if (self.codes.shape[0] - self.code_ind) > l else self.codes.shape[0] - l
self.codes[i:i+l] = codes.cpu()
self.code_ind = self.code_ind + l
if self.code_ind >= self.codes.shape[0]:
self.code_ind = 0
self.internal_step += 1
return recon_loss, commitment_loss, out
def convert_from_dvae(dvae_state_dict_file):
params = {
'channels': 80,
'positional_dims': 1,
'num_tokens': 8192,
'codebook_dim': 2048,
'hidden_dim': 512,
'stride': 2,
'num_resnet_blocks': 3,
'num_layers': 2,
'record_codes': True,
}
dvae = DiscreteVAE(**params)
dvae.load_state_dict(torch.load(dvae_state_dict_file), strict=True)
cdvae = ChannelAttentionDVAE(channel_attention_dim=256, **params)
mk, uk = cdvae.load_state_dict(dvae.state_dict(), strict=False)
for k in mk:
assert 'decoder.6' in k
for k in uk:
assert 'decoder.6' in k
cdvae.decoder[-1].bypass.load_state_dict(dvae.decoder[-1].state_dict())
torch.save(cdvae.state_dict(), 'converted_cdvae.pth')
@register_model
def register_dvae_channel_attention(opt_net, opt):
return ChannelAttentionDVAE(**opt_get(opt_net, ['kwargs'], {}))
if __name__ == '__main__':
convert_from_dvae('D:\\dlas\\experiments\\train_dvae_clips\\models\\20000_generator.pth')
'''
v = ChannelAttentionDVAE(channels=80, normalization=None, positional_dims=1, num_tokens=4096, codebook_dim=4096,
hidden_dim=256, stride=2, num_resnet_blocks=2, kernel_size=3, num_layers=2, use_transposed_convs=False)
o=v(torch.randn(1,80,256))
print(v.get_debug_values(0, 0))
print(o[-1].shape)
'''
| StarcoderdataPython |
3290946 | <gh_stars>1-10
# testing.py
#
# Authors:
# - <NAME> <<EMAIL>>
"""Support for no database testing."""
from django.test.runner import DiscoverRunner
class DatabaseLessTestRunner(DiscoverRunner):
"""A test suite runner that does not set up and tear down a database."""
def setup_databases(self, *args, **kwargs):
"""Overrides DjangoTestSuiteRunner"""
def teardown_databases(self, *args, **kwargs):
"""Overrides DjangoTestSuiteRunner"""
| StarcoderdataPython |
179622 | # run_args: -n
# statcheck: stats['slowpath_getattr'] <= 10
# statcheck: stats['slowpath_setattr'] <= 10
class C(object):
pass
def f(obj, name):
obj.__name__ = name
print obj.__name__
# pass in a class each time
for i in xrange(1000):
f(C, str(i))
# TODO test guards failing
# I think we need to get a getset descriptor that isn't __name__
# or implement __dict__ to write such a test: otherwise it's impossible
# to actually get our hands on the descriptor object :\
| StarcoderdataPython |
4802972 | # use hashmap will take O(MxN) time. where MM is a length of the word to find, and NN is the number of words.
# Trie could use less space compared to hashmap when storing many keys with the same prefix.
# In this case, using trie has only O(MxN) time complexity, where M is the key length, and N is the number of keys.
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.trie={}
def addWord(self, word: str) -> None:
"""
Adds a word into the data structure.
"""
node = self.trie
for ch in word:
if not ch in node:
node[ch] = {}
node = node[ch]
# mark here means reach to end, if there is $ then word is found
node['$'] = True
# Time: O(M)
# Space:O(M)
def search(self, word: str) -> bool:
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
"""
def search_in_node(word,node) -> bool:
for i,ch in enumerate(word):
if not ch in node:
# if the current character is '.', check all possible nodes at this level
if ch == '.':
for x in node:
# if x == $ that means it is already finished, so we dont need to move on
if x!='$' and search_in_node(word[i+1:],node[x]):
return True
# if no node leads to answer or current ch is not '.', then word is not found
return False
# if ch is found, go down to next level in trie
else:
node = node[ch]
# return the word is found, which is True stored as $ in node
return '$' in node
# Time: O(M), worst case is O(MxN) when word is underdefined (i.e. '.ig')
# Space:O(1) for well-defined words, O(M) for underdefined words
return search_in_node(word, self.trie)
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word) | StarcoderdataPython |
1621398 | <filename>excel_OpenPyXL.py
from openpyxl import load_workbook
from random import choice
"""Importar a base do Excel e Randomizar a escolha das celulas"""
wb = load_workbook('database_test.xlsx')
plan = wb['dados']
lista = plan['A']
plant = wb['tempo']
listat = plant['A']
print(len(lista))
print(len(listat))
for click in range(len(lista)):
dado = choice(lista)
tempo = choice(listat)
print(f'A palavra foi {dado.value} e o tempo {tempo.value} segundos')
| StarcoderdataPython |
4806045 | import csv
import numpy
import time
import datetime
from decimal import Decimal
from operator import itemgetter
import math
import os
###user input line 107
def interpolate_gps(GPS_selected,target_time_slot,mid_points_time_stamp):
coorinates_target_time_slot = []
print('target time',type(target_time_slot[0]))
for t in target_time_slot:
##find two nearest GPS points to target time slot start and end
diffrence = [abs(float(GPS_selected[i][5])-float(t)) for i in range(len(GPS_selected))]
## find top 2 min values, closest ones
top2 = list(sorted(enumerate(diffrence), reverse=True, key = itemgetter(1)))[-2:] # find top 2 closest items
# print('top2', top2)
m = top2[1][1] #closest
n = top2[0][1]
lat1, long1 = float(GPS_selected[top2[1][0]][13]), float(GPS_selected[top2[1][0]][14]) #closest
lat2, long2 = float(GPS_selected[top2[0][0]][13]), float(GPS_selected[top2[0][0]][14])
# print('prins',m,n,long1,lat1,long2,lat2,GPS_selected[top2[1][0]],GPS_selected[top2[0][0]])
# Estimate the GPS coordinates of taregt time slot start and end
if top2[1][1]<t<top2[0][1]: # in between
# print('first')
longx = (m*long2+n*long1)/(m+n)
latx = (m*lat2+n*lat1)/(m+n)
else:
# print('second') # on one side
longx = (n*long1-m*long2)/(n-m)
latx = (n*lat1-m*lat2)/(n-m)
coorinates_target_time_slot.append((latx,longx))
print('The estimated GPS coordinats of target time slot start/end', coorinates_target_time_slot)
## split the long and latitiudat of target time slot start and end
lat1, long1 = coorinates_target_time_slot[0][0], coorinates_target_time_slot[0][1]
lat2, long2 = coorinates_target_time_slot[1][0], coorinates_target_time_slot[1][1]
print('traget time slot long/log start/end',lat1,lat2,long1,long2)
## the points that we want to estimate are in between so the same as line 26 to 28
inter_lat = []
inter_log = []
for mid_points in mid_points_time_stamp:
mid_m = mid_points - target_time_slot[0]
mid_n = target_time_slot[1]- mid_points
inter_lat.append((mid_m*lat2+mid_n*lat1)/(mid_m+mid_n))
inter_log.append((mid_m*long2+mid_n*long1)/(mid_m+mid_n))
# print(inter_lat,inter_log)
return inter_log, inter_lat
def open_csv_files(csv_files_directory):
GPS = csv_files_directory+'/GPS_data.csv'
Image = csv_files_directory+'/Image_data.csv'
pcd = csv_files_directory+'/pcd_data.csv'
RF = csv_files_directory+'/RF_data.csv'
print('*************opening GPS csv file****************')
with open(GPS, 'r', encoding='UTF8', newline='') as f:
reader = csv.reader(f)
GPS_data = list(reader)
print('*************opening Image csv file****************')
with open(Image, 'r', encoding='UTF8', newline='') as f:
reader = csv.reader(f)
Image_data = list(reader)
print('*************opening pcd csv file****************')
with open(pcd, 'r', encoding='UTF8', newline='') as f:
reader = csv.reader(f)
pcd_data = list(reader)
print('*************opening RF csv file****************')
with open(RF, 'r', encoding='UTF8', newline='') as f:
reader = csv.reader(f)
RF_data = list(reader)
print('# of samples for GPS image lidar and RF:',len(GPS_data),len(Image_data),len(pcd_data),len(RF_data))
return GPS_data, Image_data, pcd_data, RF_data
def show_all_files_in_directory(input_path,extension):
'This function reads the path of all files in directory input_path'
files_list=[]
for path, subdirs, files in os.walk(input_path):
for file in files:
if file.endswith(extension):
files_list.append(os.path.join(path, file))
return files_list
def check_and_create(dir_path):
if os.path.exists(dir_path):
return True
else:
os.makedirs(dir_path)
return False
path_directory = '/media/batool/MULTIMODAL_DATA/Multimodal_Data/Cat4/car_20_lr_blockage_20_rl/bag_files'
bag_files = show_all_files_in_directory(path_directory,'.bag')
print(bag_files)
convert_to_x_y = True
for bf in bag_files:
bag_file_name = bf.split('/')[-1].split('.')[0]
episode = bag_file_name.split('_')[4]
path_of_expeirmnet = path_directory[:-9]+'Extract/'+bag_file_name[:-2]+'/episode_'+str(episode)
print('path_of_expeirmnet',path_of_expeirmnet)
GPS_data, Image_data, pcd_data, RF_data = open_csv_files(path_of_expeirmnet)
# extracting same exoeriment data
experiment_info = [bag_file_name.split('_')[0],bag_file_name.split('_')[1],bag_file_name.split('_')[2],bag_file_name.split('_')[3],bag_file_name.split('_')[4]]
print('************ experiment_info*************')
print('experiment_info',experiment_info)
# experiment_info = [Catergory,speed,direction,lan,episodes]
GPS_selected = [GPS_data[i] for i in range(len(GPS_data)) if GPS_data[i][0:5]==experiment_info]
pcd_selected = [pcd_data[i] for i in range(len(pcd_data)) if pcd_data[i][0:5]==experiment_info]
img_selected = [Image_data[i] for i in range(len(Image_data)) if Image_data[i][0:5]==experiment_info]
RF_selected = [RF_data[i] for i in range(len(RF_data)) if RF_data[i][0:5]==experiment_info]
synchornized_data_all = []
for i in range(len(RF_selected)-1): #range(len(RF_selected)-1) or range(6,7)
synchornized_data = []
target_time_slot = [float(RF_selected[i][5]),float(RF_selected[i+1][5])]
print('target time slot/diffrence',target_time_slot,float(target_time_slot[1])-float(target_time_slot[0]))
##find image and lidar modalities in this slot
pcd_in_range = [pcd_selected[l] for l in range(len(pcd_selected)) if target_time_slot[0]<float(pcd_selected[l][5])<target_time_slot[1]]
image_in_range = [img_selected[l] for l in range(len(img_selected)) if target_time_slot[0]<float(img_selected[l][5])<target_time_slot[1]]
print('# of pcd and image files in range',len(pcd_in_range),len(image_in_range))
if len(image_in_range) !=0: ##added
print('************ pair synchornized image and lidar data*****************')
for l in range(len(pcd_in_range)):
diffrence=[]
for j in range(len(image_in_range)):
diffrence.append(abs(float(pcd_in_range[l][5])-float(image_in_range[j][5])))
closest_image = diffrence.index(min(diffrence)) # find minuum diatnce
# attach:experiment info+time stamp of lidar+pcd file path+image file path
synchornized_data.append(experiment_info+[pcd_in_range[l][5],pcd_in_range[l][-1],image_in_range[closest_image][-1]])
print('************ Interpolating gps data*****************')
mid_points_time_stamp_to_calculate = [float(s[5]) for s in synchornized_data]
inter_log, inter_lat = interpolate_gps(GPS_selected,target_time_slot,mid_points_time_stamp_to_calculate)
print(len(inter_log),len(inter_lat),len(synchornized_data))
print('check point',GPS_selected,target_time_slot,mid_points_time_stamp_to_calculate,inter_log,inter_lat)
print('************ attaching gps interpolated data*****************')
bias_index = len(synchornized_data_all)
for l in range(len(synchornized_data)):
synchornized_data[l].append(inter_lat[l])
synchornized_data[l].append(inter_log[l])
print('************ attaching RF data*****************')
for l in range(len(synchornized_data)):
for c in range(len(RF_selected[i][6:])):
synchornized_data[l].append(RF_selected[i][6+c])
# print(synchornized_data) ### HERE check appedn
### unpack the synchronized_data
[synchornized_data_all.append(s) for s in synchornized_data]
header = ['Category','speed','direction','lane','episode','time_stamp','lida_file','image_file','lat','long','max_rssi','selected_sector','tx_mcs','rx_mcs','sqi','all_rssi', 'all_sector']
###the appended _data file is generated
if convert_to_x_y:
print('*********Converting to x y **********')
converted = []
## find minimum
# min_lat = min([float(d[8]) for d in synchornized_data_all])
# min_long = min([float(d[9]) for d in synchornized_data_all])
min_lat = 42.33812866170525
min_long = -71.08705289706138
print('min lat/long', min_lat,min_long)
for d in synchornized_data_all:
dx = 1000*(float(d[9])-min_long)*40000*math.cos((float(d[8])+min_lat)*math.pi/360)/360
dy = 1000*(float(d[8])-min_lat)*40000/360
converted.append(d+[dx,dy])
synchornized_data_all = converted
header = header + ['x','y']
print(header)
print('# of generated data',len(synchornized_data_all))
# check_and_create(path_of_expeirmnet)
# print('************ write to csv file*****************')
# with open(path_of_expeirmnet+'/'+'Synchornized_data.csv', 'w', encoding='UTF8', newline='') as f:
# writer = csv.writer(f)
# writer.writerow(header)
# writer.writerows(synchornized_data_all)
# for episodes in range(9):
# print('*************Synchronization****************')
# for Catergory in ["Cat1","Cat2"]:
# for speed in ['5mph','10mph',"20mph"]:
# for direction in ['lr']:
# for lan in ['opposite','same']:
# synchornized_data_all = []
# path_of_expeirmnet = path_directory+'Extract/'+Catergory+'_'+speed+'_'+direction+'_'+lan+'/episode_'+str(episodes)
# GPS_data, Image_data, pcd_data, RF_data = open_csv_files(path_of_expeirmnet)
# # extracting same exoeriment data
# experiment_info = [Catergory,speed,direction,lan,episodes]
# GPS_selected = [GPS_data[i] for i in range(len(GPS_data)) if GPS_data[i][0:5]==experiment_info]
# pcd_selected = [pcd_data[i] for i in range(len(pcd_data)) if pcd_data[i][0:5]==experiment_info]
# img_selected = [Image_data[i] for i in range(len(Image_data)) if Image_data[i][0:5]==experiment_info]
# RF_selected = [RF_data[i] for i in range(len(RF_data)) if RF_data[i][0:5]==experiment_info]
# for i in range(len(RF_selected)-1): #range(len(RF_selected)-1) or range(6,7)
# synchornized_data = []
# target_time_slot = [float(RF_selected[i][5]),float(RF_selected[i+1][5])]
# print('target time slot/diffrence',target_time_slot,float(target_time_slot[1])-float(target_time_slot[0]))
# ##find image and lidar modalities in this slot
# pcd_in_range = [pcd_selected[l] for l in range(len(pcd_selected)) if target_time_slot[0]<float(pcd_selected[l][5])<target_time_slot[1]]
# image_in_range = [img_selected[l] for l in range(len(img_selected)) if target_time_slot[0]<float(img_selected[l][5])<target_time_slot[1]]
# print('# of pcd and image files in range',len(pcd_in_range),len(image_in_range))
# if len(image_in_range) !=0: ##added
# print('************ pair synchornized image and lidar data*****************')
# for l in range(len(pcd_in_range)):
# diffrence=[]
# for j in range(len(image_in_range)):
# diffrence.append(abs(float(pcd_in_range[l][5])-float(image_in_range[j][5])))
# closest_image = diffrence.index(min(diffrence)) # find minuum diatnce
# # attach:experiment info+time stamp of lidar+pcd file path+image file path
# synchornized_data.append(experiment_info+[pcd_in_range[l][5],pcd_in_range[l][-1],image_in_range[closest_image][-1]])
# print('************ Interpolating gps data*****************')
# mid_points_time_stamp_to_calculate = [float(s[5]) for s in synchornized_data]
# inter_log, inter_lat = interpolate_gps(GPS_selected,target_time_slot,mid_points_time_stamp_to_calculate)
# print(len(inter_log),len(inter_lat),len(synchornized_data))
# print('************ attaching gps interpolated data*****************')
# bias_index = len(synchornized_data_all)
# for l in range(len(synchornized_data)):
# synchornized_data[l].append(inter_lat[l])
# synchornized_data[l].append(inter_log[l])
# print('************ attaching RF data*****************')
# for l in range(len(synchornized_data)):
# for c in range(len(RF_selected[i][6:])):
# synchornized_data[l].append(RF_selected[i][6+c])
# print(synchornized_data) ### HERE check appedn
# ### unpack the synchronized_data
# [synchornized_data_all.append(s) for s in synchornized_data]
# header = ['Category','speed','direction','lane','episode','time_stamp','lida_file','image_file','lat','long','max_rssi','selected_sector','tx_mcs','rx_mcs','sqi','all_rssi', 'all_sector']
# ###the appended _data file is generated
# if convert_to_x_y:
# print('*********Converting to x y **********')
# converted = []
# ## find minimum
# # min_lat = min([float(d[8]) for d in synchornized_data_all])
# # min_long = min([float(d[9]) for d in synchornized_data_all])
# min_lat = 42.33812866170525
# min_long = -71.08705289706138
# print('min lat/long', min_lat,min_long)
# for d in synchornized_data_all:
# dx = 1000*(float(d[9])-min_long)*40000*math.cos((float(d[8])+min_lat)*math.pi/360)/360
# dy = 1000*(float(d[8])-min_lat)*40000/360
# converted.append(d+[dx,dy])
# synchornized_data_all = converted
# header = header + ['x','y']
# print(header)
# print('# of generated data',len(synchornized_data_all))
# print('chekc',path_of_expeirmnet+'/'+'Synchornized_data.csv')
# # print('************ write to csv file*****************')
# # with open(path_of_expeirmnet+'/'+'Synchornized_data.csv', 'w', encoding='UTF8', newline='') as f:
# # writer = csv.writer(f)
# # writer.writerow(header)
# # writer.writerows(synchornized_data_all) | StarcoderdataPython |
3394492 | <filename>flatland/database/population/decorator/symbol_stack_placement_instances.py
"""
symbol_stack_placement_instances.py
"""
population = [
# Double solid arrow
{'Position': 1, 'Compound symbol': 'double solid arrow', 'Simple symbol': 'solid arrow',
'Arrange': 'adjacent', 'Offset x': 0, 'Offset y': 0},
{'Position': 2, 'Compound symbol': 'double solid arrow', 'Simple symbol': 'solid arrow',
'Arrange': 'last', 'Offset x': 0, 'Offset y': 0},
# Double hollow arrow
{'Position': 1, 'Compound symbol': 'double hollow arrow', 'Simple symbol': 'hollow arrow',
'Arrange': 'adjacent', 'Offset x': 0, 'Offset y': 0},
{'Position': 2, 'Compound symbol': 'double hollow arrow', 'Simple symbol': 'hollow arrow',
'Arrange': 'last', 'Offset x': 0, 'Offset y': 0},
# Double open arrow
{'Position': 1, 'Compound symbol': 'double open arrow', 'Simple symbol': 'open arrow',
'Arrange': 'adjacent', 'Offset x': 0, 'Offset y': 0},
{'Position': 2, 'Compound symbol': 'double open arrow', 'Simple symbol': 'open arrow',
'Arrange': 'last', 'Offset x': 0, 'Offset y': 0},
# Circled dot
{'Position': 1, 'Compound symbol': 'circled dot', 'Simple symbol': 'hollow large circle',
'Arrange': 'layer', 'Offset x': 0, 'Offset y': 0},
{'Position': 2, 'Compound symbol': 'circled dot', 'Simple symbol': 'solid small dot',
'Arrange': 'top', 'Offset x': 0, 'Offset y': 0}
]
| StarcoderdataPython |
3214358 | <reponame>JayceSYH/FactorKeeper
class ServiceDebugger(object):
__enable_debug = True
@classmethod
def set_debug(cls, enable=True):
cls.__enable_debug = enable
@classmethod
def debug(cls, show_form=True, show_param=True, show_response=True, count_time=True, content_limit=100, disable=False):
from flask import request
from functools import wraps
from datetime import datetime
import traceback
if not cls.__enable_debug:
def empty_wrapper(func):
return func
return empty_wrapper
def make_wrapper(func):
@wraps(func)
def wrapper(**kwargs):
if not disable:
print("-" * 10)
print("Service:{}".format(func.__name__))
if show_param:
print("Param:")
for name in kwargs:
val = kwargs[name]
print("\t{0}: {1}".format(name, val))
if show_form:
print("FormData:")
for name in request.form:
print("\t{0}: {1}".format(name, str(request.form.get(name))[:content_limit]))
if count_time:
start_time = datetime.now()
print("StartTime:{}".format(start_time))
try:
resp = func(**kwargs)
except:
print(traceback.format_exc())
resp = ""
traceback.print_exc()
if not disable:
if count_time:
end_time = datetime.now()
print("EndTime:{}".format(end_time))
print("TimeCost:{}".format(end_time - start_time))
if show_response:
print("Return:" + resp[:content_limit])
return resp
return wrapper
return make_wrapper | StarcoderdataPython |
3200171 | import unittest
import os
import sys
PYTHON_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
# add to python system path
sys.path.append(PYTHON_PATH)
class TestApiEndpoints(unittest.TestCase):
def setUp(self):
self.testable_endpoints = [
"/api/meta"
#filter_blueprint.py
,"/api/filter"
#zone_facts_blueprint.py
,"/api/census/poverty_rate/ward"
,"/api/census/poverty_rate/neighborhood_cluster"
,"/api/census/poverty_rate/census_tract"
#project_view_blueprint.py
,"/api/wmata/NL000092"
,"/api/building_permits/0.5?latitude=38.923&longitude=-76.997"
,"/api/projects/0.5?latitude=38.923&longitude=-76.997"
,"/api/count/crime/all/12/ward"
,"/api/count/building_permits/construction/12/neighborhood_cluster"
]
def test_endpoints(self):
pass
#TODO write test so that it makes sure there is no failure from endpoint, at least returns something
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
113812 | import os
from pytest import yield_fixture
from .helpers import setup
from ..api import VersionedHDF5File
@yield_fixture
def h5file(tmp_path, request):
file_name = os.path.join(tmp_path, 'file.hdf5')
name = None
version_name = None
m = request.node.get_closest_marker('setup_args')
if m is not None:
if 'file_name' in m.kwargs.keys():
file_name = m.kwargs['file_name']
if 'name' in m.kwargs.keys():
name = m.kwargs['name']
if 'version_name' in m.kwargs.keys():
version_name = m.kwargs['version_name']
f = setup(file_name=file_name, name=name, version_name=version_name)
yield f
f.close()
@yield_fixture
def vfile(tmp_path, h5file):
file = VersionedHDF5File(h5file)
yield file
file.close()
| StarcoderdataPython |
1775989 | <reponame>dclavijo45/backend-whatsup<filename>routes/login.py
from controllers.login import LoginController
login_v1 = {
"login": "/login/v1/",
"login_controller": LoginController.as_view("login_v1"),
# ----------------------------------------------------------------
} | StarcoderdataPython |
1606084 | """
this is just a script that imports pac2 which ultimately imports pac1
"""
from pac2.hola import hello_world2
def use_imports():
hello_world2()
print('hello world from {}'.format(__file__))
if __name__ == '__main__':
use_imports()
| StarcoderdataPython |
162107 | from typing import Any, Dict, List, Text
from rasa_sdk import Action, Tracker
from rasa_sdk.events import SlotSet
from rasa_sdk.executor import CollectingDispatcher
from covidflow.constants import CONTINUE_CI_SLOT
from covidflow.utils.persistence import cancel_reminder
from .lib.log_util import bind_logger
ACTION_NAME = "action_daily_ci_early_opt_out"
class ActionDailyCiEarlyOptOut(Action):
def name(self) -> Text:
return ACTION_NAME
def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict[Text, Any]]:
bind_logger(tracker)
dispatcher.utter_message(
template="utter_daily_ci__early_opt_out__acknowledge_cancel_ci"
)
dispatcher.utter_message(
template="utter_daily_ci__early_opt_out__cancel_ci_recommendation"
)
cancel_reminder(tracker.current_slot_values())
return [SlotSet(CONTINUE_CI_SLOT, False)]
| StarcoderdataPython |
62853 | <reponame>johnpaulguzman/Algorithm-Analyzer<filename>experiments/Catalan.py
def f(n):
if n <=0:
return 1
res = 0
for i in range(n):
res += f(i) * f(n-i-1)
return res | StarcoderdataPython |
3263034 | from .fuzzy_match_spell_check import FuzzyMatchSpellCheck
from .google_spell_check import GoogleSpellCheck
from .simple_spell_check import SimpleSpellCheck | StarcoderdataPython |
75956 | <filename>setup.py
#
# Copyright (c) 2021 Czech Technical University in Prague.
#
# This file is part of Roadmaptools
# (see https://github.com/aicenter/roadmap-processing).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import setuptools
from setuptools import setup
setup(
name='roadmaptools',
version='4.1.0',
description='Tools for road graph processing',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
license='MIT',
packages=setuptools.find_packages(),
url = 'https://github.com/aicenter/roadmap-processing',
# DO NOT remove the utm packege despite it is not detected by pipreqs
install_requires=['fconfig', 'numpy', 'pandas', 'googlemaps', 'typing', 'gpx_lite', 'tqdm', 'overpass', 'shapely',
'setuptools', 'rtree', 'osmread', 'scipy', 'networkx>=2.0', 'geojson', 'utm'],
python_requires='>=3',
package_data={'roadmaptools.resources': ['*.cfg']}
)
| StarcoderdataPython |
3275646 | <reponame>jparkhill/notebook-molecular-visualization<filename>nbmolviz/widgets/symmetry.py
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import ipywidgets as ipy
import numpy as np
import moldesign as mdt
from moldesign import units as u
from moldesign.utils import exports
from ..uielements.components import HBox, VBox
@exports
class Symmetrizer(ipy.Box):
def __init__(self, mol):
self._current_shapes = []
self.mol = mol
self.tolerance = 0.3 * u.angstrom
self.original_coords = mol.positions.copy()
self.showing = ipy.HTML()
self.viewer = mol.draw3d(width='650px')
""":type viewer: moldesign.viewer.GeometryViewer"""
self.description = ipy.HTML()
self.symm_selector = ipy.Select()
self.symm_selector.observe(self.show_symmetry, names='value')
self.apply_button = ipy.Button(description='Symmetrize')
self.apply_button.on_click(self.apply_selected_symmetry)
self.reset_button = ipy.Button(description='Reset')
self.reset_button.on_click(self.reset_coords)
self.apply_all_button = ipy.Button(description='Apply all',
layout=ipy.Layout(padding='10px'))
self.apply_all_button.on_click(self.set_highest_symmetry)
self.tolerance_descrip = ipy.HTML(u'<small>tolerance/\u212B</small>',)
self.tolerance_chooser = ipy.BoundedFloatText(value=self.tolerance.value_in(u.angstrom),
min=0.0)
self.recalculate_button = ipy.Button(description='Recalculate')
self.recalculate_button.on_click(self.coords_changed)
self.symm_pane = VBox([self.description,
self.symm_selector,
HBox([self.apply_button, self.reset_button]),
self.apply_all_button,
HBox([self.tolerance_chooser, self.recalculate_button]),
self.tolerance_descrip],
layout=ipy.Layout(width='325px'))
self.symmetry = None
self.coords_changed()
self.hbox = HBox([VBox([self.viewer, self.showing]), self.symm_pane])
super().__init__([self.hbox])
def reset_coords(self, *args):
self.mol.positions = self.original_coords
self.viewer.set_positions(positions=self.original_coords)
self.coords_changed()
def coords_changed(self, *args):
with self.symm_selector.hold_trait_notifications():
self.symm_selector.options = {}
self.description.value = 'Finding symmetries ...'
self.tolerance = self.tolerance_chooser.value * u.angstrom
self.symmetry = mdt.geom.get_symmetry(self.mol, tolerance=self.tolerance)
options = collections.OrderedDict()
for elem in self.symmetry.elems:
if elem.max_diff.magnitude != 0.0:
key = '{0}) {1} (error={2.magnitude:.4f} {2.units})'.format(elem.idx, elem.symbol, elem.max_diff)
else:
key = '{0}) {1} (exact)'.format(elem.idx, elem.symbol, elem.max_diff)
options[key] = elem
with self.symm_selector.hold_trait_notifications():
self.symm_selector.options = options
descrip = 'Highest symmetry group: <b>%s</b><br>' % self.symmetry.symbol
if self.symmetry.rms.magnitude == 0.0:
descrip += '(Exact)'
else:
descrip += 'RMS Error = {:.03P}'.format(self.symmetry.rms)
self.description.value = descrip
self.viewer.set_positions(positions=self.symmetry.orientation)
def apply_selected_symmetry(self, *args):
idx = self.symm_selector.value.idx
elem = self.symmetry.elems[idx]
newcoords = self.symmetry.get_symmetrized_coords(elem)
self.mol.atoms.position = newcoords
if not np.allclose(newcoords, self.symmetry.orientation, atol=1.0e-10):
self.viewer.set_positions(positions=newcoords)
self.coords_changed()
def show_symmetry(self, *args):
self.showing.value = ''
if self._current_shapes:
for s in self._current_shapes: self.viewer.remove(s)
self._current_shapes = []
if self.symm_selector.value is None:
return
elem = self.symm_selector.value
symbol = elem.symbol
self.showing.value = '%s visualization not implemented' % symbol
if symbol == 'C1':
self.showing.value = 'Identity operation'
return
elif symbol == 'Ci':
inversion = self.viewer.draw_sphere(np.zeros(3) * u.angstrom,
color='0x4AB4C4',
radius=0.5 * u.angstrom,
opacity=0.85)
self._current_shapes.append(inversion)
self.showing.value = 'Inversion center'
elif symbol == 'Cs' or (symbol[0] == 'S' and symbol[1].isdigit()):
axis = elem.get_axis()
rad = 2.5 * max(self.symmetry.orientation.max(), 3.0 * u.angstrom)
plane = self.viewer.draw_circle(np.zeros(3),
axis,
radius=rad,
opacity=0.6,
color='0xAB00FE')
self._current_shapes.append(plane)
self.showing.value = 'Mirror plane (normal = %s)' % axis
if symbol[0] in 'SC' and symbol[1].isdigit():
axis = elem.get_axis()
nrot = int(symbol[1])
projections = self.symmetry.orientation.dot(axis)
top = axis * max(3.25 * projections.max(), 3.0*u.angstrom)
bottom = axis * min(2.5 * projections.min(), -2.5*u.angstrom)
arrow = self.viewer.draw_arrow(start=bottom, end=top,
color='0x00FE03', opacity=0.8)
self._current_shapes.append(arrow)
if symbol[0] == 'S':
self.showing.value = '%d-fold improper rotation axis (%s)' % (nrot, axis)
else:
self.showing.value = '%d-fold rotation axis (%s)' % (nrot, axis)
def set_highest_symmetry(self, *args):
raise NotImplementedError()
| StarcoderdataPython |
48010 | <gh_stars>0
#!/usr/bin/env python
__author__ = "<NAME>"
import numpy
import pandas
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
def _n(x):
return (x - numpy.mean(x))/numpy.std(x)
d = pandas.read_table("../results/smultixcan_wrong.txt")
d_ = d.loc[d.n>1]
d_ = d_.assign(pvalue = numpy.maximum(d_.pvalue, 1e-30), p_i_best=numpy.maximum(d_.p_i_best, 1e-30))
d_ = d_.assign(i = range(0, d_.shape[0]), s_best = -numpy.log10(d_.p_i_best), n_prop = d_.n_indep/d_.n, s=-numpy.log10(d_.pvalue))
d_ = d_.assign(s = _n(d_.s), s_best=_n(d_.s_best), n_prop = _n(d_.n_prop), eigen_max = _n(d_.eigen_max), eigen_min =_n(d_.eigen_min))
right = numpy.array([1 if x else 0 for x in d_.right.values])
data = numpy.matrix([d_.s.values, d_.s_best.values, d_.z_sd.values, d_.n_prop.values, d_.eigen_max.values, d_.eigen_min.values]).T
X_train, X_test, y_train, y_test = train_test_split(data, right, test_size=0.2, random_state=1)
x_placeholder = tf.placeholder(X_train.dtype, X_train.shape)
y_placeholder = tf.placeholder(y_train.dtype, y_train.shape)
model = keras.Sequential([
keras.layers.Dense(12, activation=tf.nn.sigmoid, input_shape=(X_train.shape[1],)),
# keras.layers.Dense(32, activation=tf.nn.sigmoid),
# keras.layers.Dense(32, activation=tf.nn.relu),
keras.layers.Dense(2, activation=tf.nn.softmax)
])
#model.compile(optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.001),
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# model = keras.Sequential([
# keras.layers.Dense(64, activation=tf.nn.relu,
# input_shape=(X_train.shape[1],)),
# keras.layers.Dense(64, activation=tf.nn.relu),
# keras.layers.Dense(1)
# ])
#
# optimizer = tf.train.RMSPropOptimizer(0.001)
#
# model.compile(loss='mse',
# optimizer=optimizer,
# metrics=['mae'])
#model.fit(x_placeholder, y_placeholder, epochs=5)
#p = model.predict(X_test)
history = model.fit(X_train, y_train, epochs=5, validation_split=0.2)
p = model.predict(X_test)
p_ = numpy.array([x[1] for x in p])
numpy.sum((y_test-p_)**2)/p_.shape
from IPython import embed; embed(); exit() | StarcoderdataPython |
1748081 | <filename>homeassistant/scripts/influxdb_migrator.py
"""Script to convert an old-structure influxdb to a new one."""
import argparse
import sys
from typing import List
# Based on code at
# http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
def print_progress(iteration: int, total: int, prefix: str='', suffix: str='',
decimals: int=2, bar_length: int=68) -> None:
"""Print progress bar.
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
filled_length = int(round(bar_length * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
line = '#' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('%s [%s] %s%s %s\r' % (prefix, line,
percents, '%', suffix))
sys.stdout.flush()
if iteration == total:
print("\n")
def run(script_args: List) -> int:
"""Run the actual script."""
from influxdb import InfluxDBClient
parser = argparse.ArgumentParser(
description="Migrate legacy influxDB.")
parser.add_argument(
'-d', '--dbname',
metavar='dbname',
required=True,
help="InfluxDB database name")
parser.add_argument(
'-H', '--host',
metavar='host',
default='127.0.0.1',
help="InfluxDB host address")
parser.add_argument(
'-P', '--port',
metavar='port',
default=8086,
help="InfluxDB host port")
parser.add_argument(
'-u', '--username',
metavar='username',
default='root',
help="InfluxDB username")
parser.add_argument(
'-p', '--password',
metavar='password',
default='<PASSWORD>',
help="InfluxDB password")
parser.add_argument(
'-s', '--step',
metavar='step',
default=1000,
help="How many points to migrate at the same time")
parser.add_argument(
'-o', '--override-measurement',
metavar='override_measurement',
default="",
help="Store all your points in the same measurement")
parser.add_argument(
'-D', '--delete',
action='store_true',
default=False,
help="Delete old database")
parser.add_argument(
'--script',
choices=['influxdb_migrator'])
args = parser.parse_args()
# Get client for old DB
client = InfluxDBClient(args.host, args.port,
args.username, args.password)
client.switch_database(args.dbname)
# Get DB list
db_list = [db['name'] for db in client.get_list_database()]
# Get measurements of the old DB
res = client.query('SHOW MEASUREMENTS')
measurements = [measurement['name'] for measurement in res.get_points()]
nb_measurements = len(measurements)
# Move data
# Get old DB name
old_dbname = "{}__old".format(args.dbname)
# Create old DB if needed
if old_dbname not in db_list:
client.create_database(old_dbname)
# Copy data to the old DB
print("Cloning from {} to {}".format(args.dbname, old_dbname))
for index, measurement in enumerate(measurements):
client.query('''SELECT * INTO {}..:MEASUREMENT FROM '''
'"{}" GROUP BY *'.format(old_dbname, measurement))
# Print progess
print_progress(index + 1, nb_measurements)
# Delete the database
client.drop_database(args.dbname)
# Create new DB if needed
client.create_database(args.dbname)
client.switch_database(old_dbname)
# Get client for new DB
new_client = InfluxDBClient(args.host, args.port, args.username,
args.password, args.dbname)
# Counter of points without time
point_wt_time = 0
print("Migrating from {} to {}".format(old_dbname, args.dbname))
# Walk into measurenebt
for index, measurement in enumerate(measurements):
# Get tag list
res = client.query('''SHOW TAG KEYS FROM "{}"'''.format(measurement))
tags = [v['tagKey'] for v in res.get_points()]
# Get field list
res = client.query('''SHOW FIELD KEYS FROM "{}"'''.format(measurement))
fields = [v['fieldKey'] for v in res.get_points()]
# Get points, convert and send points to the new DB
offset = 0
while True:
nb_points = 0
# Prepare new points
new_points = []
# Get points
res = client.query('SELECT * FROM "{}" LIMIT {} OFFSET '
'{}'.format(measurement, args.step, offset))
for point in res.get_points():
new_point = {"tags": {},
"fields": {},
"time": None}
if args.override_measurement:
new_point["measurement"] = args.override_measurement
else:
new_point["measurement"] = measurement
# Check time
if point["time"] is None:
# Point without time
point_wt_time += 1
print("Can not convert point without time")
continue
# Convert all fields
for field in fields:
try:
new_point["fields"][field] = float(point[field])
except (ValueError, TypeError):
if field == "value":
new_key = "state"
else:
new_key = "{}_str".format(field)
new_point["fields"][new_key] = str(point[field])
# Add tags
for tag in tags:
new_point["tags"][tag] = point[tag]
# Set time
new_point["time"] = point["time"]
# Add new point to the new list
new_points.append(new_point)
# Count nb points
nb_points += 1
# Send to the new db
try:
new_client.write_points(new_points)
except Exception as exp:
raise exp
# If there is no points
if nb_points == 0:
# print("Measurement {} migrated".format(measurement))
break
else:
# Increment offset
offset += args.step
# Print progess
print_progress(index + 1, nb_measurements)
# Delete database if needed
if args.delete:
print("Dropping {}".format(old_dbname))
client.drop_database(old_dbname)
| StarcoderdataPython |
1660393 | <reponame>sotheara-leang/xFlask<gh_stars>1-10
from sqlalchemy import *
from sqlalchemy.orm import *
from flask_sqlalchemy import SQLAlchemy
from .decorator import *
from .util import *
db = SQLAlchemy(session_options={'autocommit': True})
def transactional(subtransactions=True, nested=False):
def function(f):
def wrapper(*args, **kwargs):
db.session.begin(subtransactions=subtransactions, nested=nested)
try:
result = f(*args, **kwargs)
db.session.commit()
except Exception as e:
db.session.rollback()
raise e
return result
return wrapper
return function
| StarcoderdataPython |
3299459 | <reponame>vishalbelsare/cgpm
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from cgpm.crosscat.engine import Engine
from cgpm.utils.general import gen_rng
def test_logpdf_score_crash():
rng = gen_rng(8)
# T = rng.choice([0,1], p=[.3,.7], size=250).reshape(-1,1)
T = rng.normal(size=30).reshape(-1,1)
engine = Engine(T, cctypes=['normal'], rng=rng, num_states=4)
logpdf_likelihood_initial = np.array(engine.logpdf_likelihood())
logpdf_score_initial = np.array(engine.logpdf_score())
assert np.all(logpdf_score_initial < logpdf_likelihood_initial)
# assert np.all(logpdf_likelihood_initial < logpdf_score_initial)
engine.transition(N=100)
engine.transition(kernels=['column_hypers','view_alphas'], N=10)
logpdf_likelihood_final = np.asarray(engine.logpdf_likelihood())
logpdf_score_final = np.asarray(engine.logpdf_score())
assert np.all(logpdf_score_final < logpdf_likelihood_final)
assert np.max(logpdf_score_initial) < np.max(logpdf_score_final)
| StarcoderdataPython |
1674233 | import json
import logging
from models.slave import Slave
import falcon
from mongoengine.errors import NotUniqueError
from utils.MongoStorage import MongoStorage
class SlaveResource(object):
def __init__(self):
self.mongo = MongoStorage()
@staticmethod
def on_post(req, resp):
raw_json = req.stream.read()
body = json.loads(raw_json, encoding='utf-8')
url = body["url"]
state = "READY"
try:
slave = Slave.objects(url=url)
if slave:
slave = slave[0]
slave.update(state=state)
logging.info("Slave " + slave.hash + " updated state=" + slave.state)
else:
slave = Slave(url=url, state=state).save()
logging.info("New slave " + slave.hash + " registerd; created new entry in database")
resp.body = json.dumps(slave.to_dict())
except NotUniqueError:
logging.error("Attempted to register duplicate slave URL!")
resp.body = json.dumps({"error": "Slave URL already exists"})
resp.status = falcon.HTTP_400
| StarcoderdataPython |
126492 | import multiprocessing
bind = "0.0.0.0:8000"
workers = multiprocessing.cpu_count() * 2 + 1
threads = workers*3
# accesslog = '/tmp/accesslog.txt'
# access_log_format = 'Neon (Outbreak News Today) %(h)s %(u)s %(t)s %(m)s Resopnse: %(s)s "%(q)s"'
| StarcoderdataPython |
64863 | from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union
import attr
from ..models.billing_invoice import BillingInvoice
from ..types import UNSET, Unset
from ..util.serialization import is_not_none
T = TypeVar("T", bound="ListAccountBillingInvoicesResponse")
@attr.s(auto_attribs=True)
class ListAccountBillingInvoicesResponse:
"""
Attributes:
billing_invoices (List[BillingInvoice]):
next_page_token (Union[Unset, str]):
"""
billing_invoices: List[BillingInvoice]
next_page_token: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self, pick_by_predicate: Optional[Callable[[Any], bool]] = is_not_none) -> Dict[str, Any]:
billing_invoices = []
for billing_invoices_item_data in self.billing_invoices:
billing_invoices_item = billing_invoices_item_data.to_dict()
billing_invoices.append(billing_invoices_item)
next_page_token = self.next_page_token
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"billingInvoices": billing_invoices,
}
)
if next_page_token is not UNSET:
field_dict["nextPageToken"] = next_page_token
field_dict = {k: v for k, v in field_dict.items() if v != UNSET}
if pick_by_predicate is not None:
field_dict = {k: v for k, v in field_dict.items() if pick_by_predicate(v)}
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
billing_invoices = []
_billing_invoices = d.pop("billingInvoices")
for billing_invoices_item_data in _billing_invoices:
billing_invoices_item = BillingInvoice.from_dict(billing_invoices_item_data)
billing_invoices.append(billing_invoices_item)
next_page_token = d.pop("nextPageToken", UNSET)
list_account_billing_invoices_response = cls(
billing_invoices=billing_invoices,
next_page_token=next_page_token,
)
list_account_billing_invoices_response.additional_properties = d
return list_account_billing_invoices_response
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| StarcoderdataPython |
6854 | import ast
import re
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import astunparse
from tests.common import AstunparseCommonTestCase
class DumpTestCase(AstunparseCommonTestCase, unittest.TestCase):
def assertASTEqual(self, dump1, dump2):
# undo the pretty-printing
dump1 = re.sub(r"(?<=[\(\[])\n\s+", "", dump1)
dump1 = re.sub(r"\n\s+", " ", dump1)
self.assertEqual(dump1, dump2)
def check_roundtrip(self, code1, filename="internal", mode="exec"):
ast_ = compile(str(code1), filename, mode, ast.PyCF_ONLY_AST)
dump1 = astunparse.dump(ast_)
dump2 = ast.dump(ast_)
self.assertASTEqual(dump1, dump2)
| StarcoderdataPython |
1646820 | import logging
import numpy as np
from ..Dataset import Dataset
def crop(jets, pileup=False):
#logging.warning("Cropping...")
if pileup:
logging.warning("pileup")
pt_min, pt_max, m_min, m_max = 300, 365, 150, 220
else:
pt_min, pt_max, m_min, m_max = 250, 300, 50, 110
good_jets = []
bad_jets = []
#good_indices = []
for i, j in enumerate(jets):
if pt_min < j.pt < pt_max and m_min < j.mass < m_max:
good_jets.append(j)
else:
bad_jets.append(j)
# Weights for flatness in pt
w = np.zeros(len(good_jets))
y_ = np.array([jet.y for jet in good_jets])
jets_0 = [jet for jet in good_jets if jet.y == 0]
pdf, edges = np.histogram([j.pt for j in jets_0], density=True, range=[pt_min, pt_max], bins=50)
pts = [j.pt for j in jets_0]
indices = np.searchsorted(edges, pts) - 1
inv_w = 1. / pdf[indices]
inv_w /= inv_w.sum()
#w[y_==0] = inv_w
for i, (iw, jet) in enumerate(zip(inv_w, good_jets)):
if jet.y == 0:
w[i] = iw
jets_1 = [jet for jet in good_jets if jet.y == 1]
pdf, edges = np.histogram([j.pt for j in jets_1], density=True, range=[pt_min, pt_max], bins=50)
pts = [j.pt for j in jets_1]
indices = np.searchsorted(edges, pts) - 1
inv_w = 1. / pdf[indices]
inv_w /= inv_w.sum()
#w[y_==1] = inv_w
for i, (iw, jet) in enumerate(zip(inv_w, good_jets)):
if jet.y == 1:
w[i] = iw
return good_jets, bad_jets, w
def crop_dataset(dataset):
logging.info(dataset.subproblem)
pileup = (dataset.subproblem == 'pileup')
good_jets, bad_jets, w = crop(dataset.jets, pileup)
cropped_dataset = Dataset(bad_jets)
new_dataset = Dataset(good_jets, w)
return new_dataset, cropped_dataset
| StarcoderdataPython |
1600218 | <filename>Sleepless/modules/weather_test.py
import unittest
class TempTrack:
""" TemperatureTracker """
def __init__(self):
#nessary?
self.temps = [0] * 140
self.num_temps = 0
self.min = 140
self.max = -1
self.total = 0
self.mean = None
self.max_freq = 0
self.mode = None
def insert(self, temp):
if temp < 0 or temp > 140:
raise Exception
self.temps[temp] += 1
self.num_temps += 1
if temp < self.min:
self.min = temp
if temp > self.max:
self.max = temp
self.total += temp
self.mean = self.total / float(self.num_temps)
if self.temps[temp] > self.max_freq:
self.max_freq = self.temps[temp]
self.mode = temp
def get_max(self):
max =self.max
if max == -1:
max = None
return max
def get_min(self):
min = self.min
if min == 140:
min = None
return min
def get_mean(self):
return self.mean
def get_mode(self):
return self.mode
class TestTempTracker(unittest.TestCase):
def _test_tracker(self, temps, min, max, mean, modes):
tracker = TempTrack()
for temp in temps:
tracker.insert(temp)
print("")
print("Test: temps = %s" % temps)
print(" min %s max %s" % (tracker.get_min(), tracker.get_max()))
#self.assertTrue(tracker.get_min() == min)
self.assertTrue(tracker.get_max() == max)
print(" mean %s mode %s" % (tracker.get_mean(), tracker.get_mode()))
self.assertTrue(tracker.get_mean() == mean)
self.assertTrue(tracker.get_mode() in modes)
def test_null(self):
self._test_tracker([], None, None, None, [None])
def test_0(self):
self._test_tracker([0], 0, 0, 0, [0])
def test_01(self):
self._test_tracker([0, 1], 0, 1, 0.5, [0, 1])
def test_011(self):
self._test_tracker([0, 1, 1], 0, 1, 2 / 3.0, [1])
def test_0112(self):
self._test_tracker([0, 1, 1, 2], 0, 2, 4 / 4.0, [1])
def test_0111225(self):
self._test_tracker([0, 1, 1, 2, 2, 5], 0, 5, 11 / 6.0, [1, 2])
def test_011122555(self):
self._test_tracker([0, 1, 1, 2, 2, 5, 5, 5], 0, 5, 21 / 8.0, [5])
def test_extremes(self):
tracker = TempTrack()
self.assertRaises(Exception, tracker.insert, -1)
#self.assertRaises(Exception, tracker.insert, 111)
if __name__ == "__main__":
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestTempTracker)
unittest.TextTestRunner(verbosity=2).run(suite) | StarcoderdataPython |
1791586 | """
Tests stringify functions used in xmodule html
"""
from lxml import etree
from xmodule.stringify import stringify_children
def test_stringify():
text = 'Hi <div x="foo">there <span>Bruce</span><b>!</b></div>'
html = f'''<html a="b" foo="bar">{text}</html>'''
xml = etree.fromstring(html)
out = stringify_children(xml)
assert out == text
def test_stringify_again():
html = r"""<html name="Voltage Source Answer" >A voltage source is non-linear!
<div align="center">
<img src="/static/images/circuits/voltage-source.png"/>
\(V=V_C\)
</div>
But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>,
which means linear except for an offset.
</html>
"""
html = """<html>A voltage source is non-linear!
<div align="center">
</div>
But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>,
which means linear except for an offset.
</html>
"""
xml = etree.fromstring(html)
out = stringify_children(xml)
print("output:")
print(out)
# Tracking strange content repeating bug
# Should appear once
assert out.count("But it is ") == 1
| StarcoderdataPython |
86419 | <reponame>shijiale0609/Python_Data_Analysis
import scipy.misc
import matplotlib.pyplot as plt
# This script demonstrates fancy indexing by setting values
# on the diagonals to 0.
# Load the Lena array
lena = scipy.misc.lena()
xmax = lena.shape[0]
ymax = lena.shape[1]
# Fancy indexing
# Set values on diagonal to 0
# x 0-xmax
# y 0-ymax
lena[range(xmax), range(ymax)] = 0
# Set values on other diagonal to 0
# x xmax-0
# y 0-ymax
lena[range(xmax-1,-1,-1), range(ymax)] = 0
# Plot Lena with diagonal lines set to 0
plt.imshow(lena)
plt.show()
| StarcoderdataPython |
1603951 | <reponame>Ark0617/mediator_IL<filename>visualize_result.py
from baselines.common import plot_util as pu
import matplotlib.pyplot as plt
import numpy as np
results = pu.load_results('~/logs/NewHopperCmp/')
print(len(results))
pu.plot_results(results, average_group=True, split_fn=lambda _: '')
#print(np.cumsum(results[0].monitor.l))
#plt.plot(np.cumsum(results[0].monitor.l), pu.smooth(results[0].monitor.r, radius=10))
#plt.show()
| StarcoderdataPython |
1730237 | # -*- coding: utf-8 -*-
import socket
import hashlib
import base64
import logging
GEVENT = None
TCP_BUF_SIZE = 8192
WS_MAGIC_STRING = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
RESPONSE_STRING = 'HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\n' \
'Connection: Upgrade\r\nSec-WebSocket-Accept: {}\r\n\r\n'
logger = logging.getLogger('I`m_Black')
class T(object):
type = 'gevent' # threading or gevent
def __init__(self):
try:
global GEVENT
import gevent
from gevent.pool import Group
from gevent.monkey import patch_all
patch_all()
self.Thread = Group().spawn
GEVENT = gevent
logger.warning('import gevent success')
except Exception as e:
logger.warning('failed to import gevent' + repr(e))
self.type = 'threading'
from threading import Thread
self.Thread = Thread
def thread(self, target, args=()):
if self.type == 'gevent':
self.Thread(target, *args)
else:
self.Thread(target=target, args=args).start()
class WebSocket(object):
def __init__(self, host=None, port=None, listen_count=None):
self.host = host or '127.0.0.1'
self.port = port or 1908
self.listen_count = listen_count or 0
self.Thread = T().thread
self.path_mappings = dict()
def request(self, f):
_request = dict()
line = f.readline().strip()
line_l = line.split(' ')
if len(line_l) == 3:
_request.update({
'method': line_l[0],
'path': line_l[1],
'http_protocol': line_l[2],
})
else:
raise Exception('method, path, http_protocol error: %s' % line)
while line:
if line == '\r\n':
break
line = f.readline()
line_l = line.strip().split(': ')
if len(line_l) == 2:
_request[line_l[0]] = line_l[1]
if _request['method'] != 'GET':
raise Exception('method error, %s' % _request['method'])
if _request['path'] not in self.path_mappings.keys():
raise Exception('path not found, %s' % _request['path'])
if _request['http_protocol'] != 'HTTP/1.1':
raise Exception('http_protocol error, %s' % _request['http_protocol'])
return _request
@staticmethod
def ws_key(request):
if request.get('Upgrade') == 'websocket':
if request.get('Connection') == 'Upgrade':
if request.get('Sec-WebSocket-Version') == '13':
return request.get('Sec-WebSocket-Key')
@staticmethod
def make_ws_accept(ws_key):
key = ws_key + WS_MAGIC_STRING
sha1 = hashlib.sha1()
sha1.update(key)
key = sha1.digest()
ws_accept = base64.b64encode(key)
return ws_accept
@staticmethod
def parse_data(msg):
code_length = ord(msg[1]) & 127
if code_length == 126:
masks = msg[4:8]
data = msg[8:]
elif code_length == 127:
masks = msg[10:14]
data = msg[14:]
else:
masks = msg[2:6]
data = msg[6:]
i = 0
raw_str = ''
for d in data:
raw_str += chr(ord(d) ^ ord(masks[i % 4]))
i += 1
return raw_str
@staticmethod
def send_data(raw_str):
back_str = list()
back_str.append('\x81')
data_length = len(raw_str)
if data_length < 125:
back_str.append(chr(data_length))
else:
back_str.append(chr(126))
back_str.append(chr(data_length >> 8))
back_str.append(chr(data_length & 0xFF))
back_str = ''.join(back_str) + raw_str
return back_str
def register(self, path, recv_func=None, send_func=None):
self.path_mappings[path] = {
'recv_func': recv_func,
'send_func': send_func,
'client_pools': [],
}
def fake_tunnel(self, client, path):
if path in self.path_mappings.keys():
self.path_mappings[path]['client_pools'].append(client)
path_mapping = self.path_mappings.get(path, {})
recv_func = path_mapping.get('recv_func')
send_func = path_mapping.get('send_func')
self.Thread(target=self.recv, args=(recv_func, client, path))
self.Thread(target=send_func, args=(self,))
def close(self, client, path):
try:
self.path_mappings[path]['client_pools'].remove(client)
except ValueError:
pass
try:
try:
client.shutdown(socket.SHUT_RDWR)
except:
pass
client.close()
except Exception as e:
logger.warning('client close error, e=%s' % repr(e))
def recv(self, recv_func, client, path):
data = ''
d = client.recv(TCP_BUF_SIZE)
while d:
data += d
if len(d) < TCP_BUF_SIZE:
recv_func(self.parse_data(data))
data = ''
d = client.recv(TCP_BUF_SIZE)
self.close(client, path)
def send(self, data, path):
clients = self.path_mappings.get(path, {}).get('client_pools', [])
for client in clients:
try:
client.sendall(self.send_data(data))
except Exception as e:
logger.warning('client sendall error, e=%s' % repr(e))
self.close(client, path)
def serve_forever(self):
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind((self.host, self.port))
soc.listen(self.listen_count)
while True:
try:
client, _ = soc.accept()
try:
f = client.makefile()
request = self.request(f)
ws_key = self.ws_key(request)
if ws_key:
ws_accept = self.make_ws_accept(ws_key)
data = RESPONSE_STRING.format(ws_accept)
client.sendall(data)
self.fake_tunnel(client, request.get('path'))
else:
raise Exception('Sec-Websocket-Key error')
except:
logging.error('closing client', exc_info=1)
client.close()
except Exception as e:
logging.error(repr(e))
__all__ = ['WebSocket']
| StarcoderdataPython |
1768389 | from test_include import *
import numpy as np
'''
# of queries: 5, number of bins in ISOMER: 7
# of queries: 10, number of bins in ISOMER: 30
# of queries: 15, number of bins in ISOMER: 80
# of queries: 20, number of bins in ISOMER: 203
# of queries: 25, number of bins in ISOMER: 603
# of queries: 30, number of bins in ISOMER: 1363
# of queries: 35, number of bins in ISOMER: 2640
# of queries: 40, number of bins in ISOMER: 4342
# of queries: 45, number of bins in ISOMER: 6906
# of queries: 50, number of bins in ISOMER: 10310
# of queries: 55, number of bins in ISOMER: 15125
# of queries: 60, number of bins in ISOMER: 21262
# of queries: 65, number of bins in ISOMER: 29114
'''
def generate_dataset(size = 100000, seed = 1):
np.random.seed(seed)
cov = np.array([[0.09, 0.04], [0.04, 0.09]])
mean = np.array([0.5, 0.5])
return np.random.multivariate_normal(mean, cov, size)
def count_tuple(data, boundary):
assert(len(boundary) == 4)
D = data
D = D[np.logical_and(D[:,0] >= boundary[0], D[:,0] <= boundary[2])]
D = D[np.logical_and(D[:,1] >= boundary[1], D[:,1] <= boundary[3])]
return D.shape[0]
qrid = [0]
def generate_random_queries(data, size = 10, seed = 0):
np.random.seed(seed)
boundaries = np.random.rand(size, 4)
boundaries[:,0:2] = boundaries[:,0:2] * 0.5
boundaries[:,2:4] = boundaries[:,2:4] * 0.5 + 0.5
N = float(data.shape[0])
freqs = map(lambda b: count_tuple(data, b) / N, boundaries)
queries = []
for i in range(size):
#print boundaries[i]
queries.append(Query(boundaries[i], freqs[i], qrid[0]))
qrid[0] = qrid[0] + 1
return queries
def generate_permanent_queries(data):
nx = 10
ny = 10
xstep = 1.0 / nx
ystep = 1.0 / ny
boundaries = []
#np.zeros((nx + ny - 1, 4)) # last constraint is redundant
for i in range(nx-1):
xstart = xstep * i
xend = xstep * (i + 1)
boundaries.append([xstart, 0.0, xend, 1.0])
for i in range(ny-1):
ystart = ystep * i
yend = ystep * (i + 1)
boundaries.append([0.0, ystart, 1.0, yend])
xstart = xstep * (nx-1)
xend = xstep * nx
boundaries.append([xstart, 0.0, xend, 1.0])
N = float(data.shape[0])
freqs = map(lambda b: count_tuple(data, b) / N, boundaries)
queries = []
for i in range(len(boundaries)):
queries.append(Query(boundaries[i], freqs[i], qrid[0]))
qrid[0] = qrid[0] + 1
return queries
def generate_test_queries(data, size = (30, 30)):
nx = size[0]
ny = size[1]
xstep = 1.0/nx
ystep = 1.0/ny
e = 1e-6
N = float(data.shape[0])
queries = []
freqmat = np.zeros((ny,nx))
for i in range(nx):
for j in range(ny):
xstart = xstep * i
xend = xstep * (i+1) - e
ystart = ystep * j
yend = ystep * (j+1) - e
b = (xstart, ystart, xend, yend)
c = count_tuple(data, b)
queries.append(Query(b, c / N, None))
freqmat[j,i] = c
return queries, freqmat
def viz_freqmap(freqmap):
#plt.imshow(freqmap, cmap='hot', interpolation='nearest')
plt.imshow(freqmap, cmap='hot', interpolation='nearest', vmin=0, vmax=0.0025)
def build_isomer(past_queries):
root = Node(Query([0, 0, 1, 1], 0.830340, 0))
qid = 1 # we are going to renumber the past queries
for i, q in enumerate(past_queries):
q.uid = qid
qid = qid + 1
#print q.uid, q
root.crack(Node(q))
if i > 0 and i % 5 == 0:
print '# of queries: %d, number of bins in ISOMER: %d' % (i, root.count())
print 'Number of total bins in ISOMER: %d' % root.count()
root.assign_optimal_freq()
return root
def test_isomer(data, past_queries, test_queries):
root = build_isomer(past_queries)
#print root
print 'total number of regions: %d' % root.count()
isomer_answers = map(lambda t: root.answer(t), test_queries)
groundtruth_answers = map(lambda t: t.freq, test_queries)
a = np.array(isomer_answers)
a.shape = (30, 30)
plt.subplot(1, 3, 1)
viz_freqmap(a)
g = np.array(groundtruth_answers)
g.shape = (30, 30)
plt.subplot(1, 3, 2)
viz_freqmap(g)
plt.subplot(1, 3, 3)
viz_freqmap(np.abs(g-a))
print np.linalg.norm(a - g)
print 'sum of estimates: %f' % np.sum(a)
print 'sum of groundtruth: %f' % np.sum(g)
plt.show()
def test_crumbs(data, past_queries, test_queries):
quickSel = Crumbs()
quickSel.assign_optimal_freq(past_queries)
crumbs_answers = map(lambda t: quickSel.answer(t), test_queries)
groundtruth_answers = map(lambda t: t.freq, test_queries)
a = np.array(crumbs_answers)
a.shape = (30, 30)
plt.subplot(1, 3, 1)
viz_freqmap(a)
g = np.array(groundtruth_answers)
g.shape = (30, 30)
plt.subplot(1, 3, 2)
viz_freqmap(g)
plt.subplot(1, 3, 3)
viz_freqmap(np.abs(g-a))
print np.linalg.norm(a - g)
print 'sum of estimates: %f' % np.sum(a)
print 'sum of groundtruth: %f' % np.sum(g)
plt.show()
def test_both(data, past_queries, test_queries):
root = build_isomer(perma_queries[:18] + past_queries[:5])
isomer_answers = map(lambda t: root.answer(t), test_queries)
quickSel = Crumbs()
A, b, v = quickSel.assign_optimal_freq(past_queries + perma_queries)
#A, b, v = quickSel.assign_optimal_freq(perma_queries)
crumbs_answers = map(lambda t: quickSel.answer(t), test_queries)
groundtruth_answers = map(lambda t: t.freq, test_queries)
plt.subplot(1, 3, 1)
a = np.array(isomer_answers)
a.shape = (30, 30)
viz_freqmap(a)
print 'sum of isomer\' estimates: %f' % np.sum(a)
plt.subplot(1, 3, 2)
a = np.array(crumbs_answers)
a.shape = (30, 30)
viz_freqmap(a)
print 'sum of quickSel\' estimates: %f' % np.sum(a)
plt.subplot(1, 3, 3)
g = np.array(groundtruth_answers)
g.shape = (30, 30)
viz_freqmap(g)
print 'sum of groundtruth: %f' % np.sum(g)
plt.show(block=True)
data = generate_dataset()
past_queries = generate_random_queries(data, size = 20)
perma_queries = generate_permanent_queries(data)
test_queries, freqmat = generate_test_queries(data)
import matplotlib.pyplot as plt
#test_isomer(data, perma_queries + past_queries, test_queries)
#test_crumbs(data, past_queries + perma_queries, test_queries)
test_both(data, past_queries, test_queries)
| StarcoderdataPython |
106824 | import scraperwiki
import lxml.html
import urlparse
import urllib
import json
from rdflib import Graph, URIRef
from unidecode import unidecode
from geopy.geocoders import Nominatim
## Dbpedia for b and d dates
artists_url = [url.split('/')[-1] for url in json.load(open("wiki_dump.json")).keys()]
def unquote_uni(artist):
return unidecode(urllib.unquote(artist.replace("_", " ").split('/')[-1].encode('utf-8')).decode("utf-8"))
wiki_prefix = "https://en.wikipedia.org/wiki/"
info = {unquote_uni(artist):{"birthYear":'????', "deathYear":'????', "birthPlace":'Not available', "birthAddress":'Not available' ,"lat":'Not available', "lon":'Not available', "wiki":wiki_prefix+artist} for artist in artists_url}
geolocator = Nominatim()
def get_geo(place_url):
try:
html = scraperwiki.scrape(place_url)
root = lxml.html.fromstring(html)
try:
lat = root.xpath("//span[@property='geo:lat']/text()")[0]
lon = root.xpath("//span[@property='geo:long']/text()")[0]
return {"lat":float(lat), "lon":float(lon)}
except:
return None
except:
return None
counter = 0
for artist in artists_url:
un_artist = unquote_uni(artist)
g = Graph()
try:
g.parse("http://dbpedia.org/resource/{0}".format(artist))
except:
continue;
for stmt in g.subject_objects(URIRef("http://dbpedia.org/ontology/birthDate")):
info[un_artist]["birthYear"] = stmt[1][:4]
for stmt in g.subject_objects(URIRef("http://dbpedia.org/ontology/deathDate")):
info[un_artist]["deathYear"] = stmt[1][:4]
place=[]
for stmt in g.subject_objects(URIRef("http://dbpedia.org/ontology/birthPlace")):
place.append(stmt[1])
info[un_artist]["birthPlace"]= '; '.join([a.split('/')[-1] for a in place])
for p in place:
geo= get_geo(p)
if geo:
info[un_artist]["lat"]=geo["lat"]
info[un_artist]["lon"]=geo["lon"]
# info[un_artist]["birthAddress"] = geolocator.reverse([geo['lat'], geo['lon']]).address
break
else:
continue
counter = counter + 1
print counter
# find how many artists have no deathyear
json.dump(info, open("../../app/static/artist_info.json", "wb"))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.