max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
rqalpha/utils/strategy_loader_help.py | ForrestLin0805/rqalpha | 5,263 | 6612551 | # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 <EMAIL> 获取。
import sys
import traceback
import six
from rqalpha.utils.exception import patch_user_exc, CustomError, CustomException
def compile_strategy(source_code, strategy, scope):
try:
code = compile(source_code, strategy, 'exec')
six.exec_(code, scope)
return scope
except Exception as e:
exc_type, exc_val, exc_tb = sys.exc_info()
exc_val = patch_user_exc(exc_val, force=True)
try:
msg = str(exc_val)
except Exception as e1:
msg = ""
six.print_(e1)
error = CustomError()
error.set_msg(msg)
error.set_exc(exc_type, exc_val, exc_tb)
stackinfos = list(traceback.extract_tb(exc_tb))
if isinstance(e, (SyntaxError, IndentationError)):
error.add_stack_info(exc_val.filename, exc_val.lineno, "", exc_val.text)
else:
for item in stackinfos:
filename, lineno, func_name, code = item
if strategy == filename:
error.add_stack_info(*item)
# avoid empty stack
if error.stacks_length == 0:
error.add_stack_info(*item)
raise CustomException(error)
| # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 <EMAIL> 获取。
import sys
import traceback
import six
from rqalpha.utils.exception import patch_user_exc, CustomError, CustomException
def compile_strategy(source_code, strategy, scope):
try:
code = compile(source_code, strategy, 'exec')
six.exec_(code, scope)
return scope
except Exception as e:
exc_type, exc_val, exc_tb = sys.exc_info()
exc_val = patch_user_exc(exc_val, force=True)
try:
msg = str(exc_val)
except Exception as e1:
msg = ""
six.print_(e1)
error = CustomError()
error.set_msg(msg)
error.set_exc(exc_type, exc_val, exc_tb)
stackinfos = list(traceback.extract_tb(exc_tb))
if isinstance(e, (SyntaxError, IndentationError)):
error.add_stack_info(exc_val.filename, exc_val.lineno, "", exc_val.text)
else:
for item in stackinfos:
filename, lineno, func_name, code = item
if strategy == filename:
error.add_stack_info(*item)
# avoid empty stack
if error.stacks_length == 0:
error.add_stack_info(*item)
raise CustomException(error)
| zh | 0.990676 | # -*- coding: utf-8 -*- # 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”) # # 除非遵守当前许可,否则不得使用本软件。 # # * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件): # 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。 # 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。 # # * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件): # 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。 # 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。 # 详细的授权流程,请联系 <EMAIL> 获取。 # avoid empty stack | 1.831321 | 2 |
lib/cogs/welcome.py | null-2020/titan | 0 | 6612552 | from discord import Forbidden
from discord.ext.commands import Cog
from discord.ext.commands import command
from ..db import db
class Welcome(Cog):
def __init__(self, bot):
self.bot = bot
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("welcome")
@Cog.listener()
async def on_member_join(self, member):
db.execute("INSERT INTO exp (ID, UserID, GuildID) VALUES (?, ?, ?)", hex(member.id+member.guild.id), member.id, member.guild.id)
wel = list(db.record("SELECT WelcomeChannel FROM guilds WHERE GuildID = ?", member.guild.id))
if wel[0] == 0:
pass
else:
await self.bot.get_channel(wel[0]).send(f"Welcome to **{member.guild.name}** {member.mention}!")
try:
await member.send(f"Welcome to **{member.guild.name}**! Please enjoy your stay!")
except Forbidden:
pass
@Cog.listener()
async def on_member_remove(self, member):
db.execute("DELETE FROM exp WHERE ID = ?", hex(member.id + member.guild.id))
wel = list(db.record("SELECT WelcomeChannel FROM guilds WHERE GuildID = ?", member.guild.id))
if wel[0] == 0:
pass
else:
await self.bot.get_channel(wel[0]).send(f"{member.display_name} has left {member.guild.name}.")
@Cog.listener()
async def on_guild_join(self, guild): # Fix these two listeners
db.execute("INSERT INTO guilds (GuildID) VALUES (?)", guild.id)
db.multiexec("INSERT INTO exp (ID, UserID, GuildID) VALUES (?, ?, ?)", ((hex(member.id + member.guild.id), member.id, member.guild.id) for member in guild.members if not member.bot))
@Cog.listener()
async def on_guild_remove(self, guild):
db.execute("DELETE FROM guilds WHERE GuildID = ?", guild.id)
db.multiexec("DELETE FROM exp WHERE ID = ? AND UserID = ? AND GuildID = ?", ((hex(member.id + member.guild.id), member.id, member.guild.id) for member in guild.members if not member.bot))
def setup(bot):
bot.add_cog(Welcome(bot)) | from discord import Forbidden
from discord.ext.commands import Cog
from discord.ext.commands import command
from ..db import db
class Welcome(Cog):
def __init__(self, bot):
self.bot = bot
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("welcome")
@Cog.listener()
async def on_member_join(self, member):
db.execute("INSERT INTO exp (ID, UserID, GuildID) VALUES (?, ?, ?)", hex(member.id+member.guild.id), member.id, member.guild.id)
wel = list(db.record("SELECT WelcomeChannel FROM guilds WHERE GuildID = ?", member.guild.id))
if wel[0] == 0:
pass
else:
await self.bot.get_channel(wel[0]).send(f"Welcome to **{member.guild.name}** {member.mention}!")
try:
await member.send(f"Welcome to **{member.guild.name}**! Please enjoy your stay!")
except Forbidden:
pass
@Cog.listener()
async def on_member_remove(self, member):
db.execute("DELETE FROM exp WHERE ID = ?", hex(member.id + member.guild.id))
wel = list(db.record("SELECT WelcomeChannel FROM guilds WHERE GuildID = ?", member.guild.id))
if wel[0] == 0:
pass
else:
await self.bot.get_channel(wel[0]).send(f"{member.display_name} has left {member.guild.name}.")
@Cog.listener()
async def on_guild_join(self, guild): # Fix these two listeners
db.execute("INSERT INTO guilds (GuildID) VALUES (?)", guild.id)
db.multiexec("INSERT INTO exp (ID, UserID, GuildID) VALUES (?, ?, ?)", ((hex(member.id + member.guild.id), member.id, member.guild.id) for member in guild.members if not member.bot))
@Cog.listener()
async def on_guild_remove(self, guild):
db.execute("DELETE FROM guilds WHERE GuildID = ?", guild.id)
db.multiexec("DELETE FROM exp WHERE ID = ? AND UserID = ? AND GuildID = ?", ((hex(member.id + member.guild.id), member.id, member.guild.id) for member in guild.members if not member.bot))
def setup(bot):
bot.add_cog(Welcome(bot)) | en | 0.724875 | # Fix these two listeners | 2.483599 | 2 |
module_api/rogertests/test_localize.py | rogertalk/roger-api | 3 | 6612553 | # -*- coding: utf-8 -*-
import mock
from roger import localize
import rogertests
class BaseTestCase(rogertests.RogerTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
class Strings(BaseTestCase):
def test_get_call_en(self):
code = '233'
receiver = '+1242323424'
text = localize.get_string('call.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'code', text)
# all non-localized countries get en-us
code = '233'
receiver = '+7242323424'
text = localize.get_string('call.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn('code', text)
def test_get_call_es(self):
code = '233'
receiver = '+342342323424'
text = localize.get_string('call.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verificación', text)
def test_get_call_pt(self):
code = '233'
receiver = '+5542323424'
text = localize.get_string('call.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verificação', text)
@mock.patch('roger.localize._get_country')
def test_get_email_en(self, get_country_mock):
get_country_mock.return_value = 'US'
code = '233'
receiver = '<EMAIL>'
subject = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, subject)
self.assertIn(u'verification', subject)
body = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, body)
# all non-localized countries get en-us
get_country_mock.return_value = 'CN'
code = '233'
receiver = '<EMAIL>'
subject = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, subject)
self.assertIn(u'verification', subject)
body = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, body)
@mock.patch('roger.localize._get_country')
def test_get_email_es(self, get_country_mock):
get_country_mock.return_value = 'MX'
code = '233'
receiver = '<EMAIL>'
subject = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, subject)
self.assertIn(u'verificación', subject)
body = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, body)
@mock.patch('roger.localize._get_country')
def test_get_email_pt(self, get_country_mock):
get_country_mock.return_value = 'BR'
code = '233'
receiver = '<EMAIL>'
subject = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, subject)
self.assertIn(u'verificação', subject)
body = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, body)
def test_get_sms_en(self):
code = '233'
receiver = '+1242323424'
text = localize.get_string('sms.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verification', text)
# all non-localized countries get en-us
code = '233'
receiver = '+7242323424'
text = localize.get_string('sms.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verification', text)
def test_get_sms_es(self):
code = '233'
receiver = '+342342323424'
text = localize.get_string('sms.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verificación', text)
def test_get_sms_pt(self):
code = '233'
receiver = '+5542323424'
text = localize.get_string('sms.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verificação', text)
| # -*- coding: utf-8 -*-
import mock
from roger import localize
import rogertests
class BaseTestCase(rogertests.RogerTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
class Strings(BaseTestCase):
def test_get_call_en(self):
code = '233'
receiver = '+1242323424'
text = localize.get_string('call.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'code', text)
# all non-localized countries get en-us
code = '233'
receiver = '+7242323424'
text = localize.get_string('call.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn('code', text)
def test_get_call_es(self):
code = '233'
receiver = '+342342323424'
text = localize.get_string('call.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verificación', text)
def test_get_call_pt(self):
code = '233'
receiver = '+5542323424'
text = localize.get_string('call.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verificação', text)
@mock.patch('roger.localize._get_country')
def test_get_email_en(self, get_country_mock):
get_country_mock.return_value = 'US'
code = '233'
receiver = '<EMAIL>'
subject = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, subject)
self.assertIn(u'verification', subject)
body = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, body)
# all non-localized countries get en-us
get_country_mock.return_value = 'CN'
code = '233'
receiver = '<EMAIL>'
subject = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, subject)
self.assertIn(u'verification', subject)
body = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, body)
@mock.patch('roger.localize._get_country')
def test_get_email_es(self, get_country_mock):
get_country_mock.return_value = 'MX'
code = '233'
receiver = '<EMAIL>'
subject = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, subject)
self.assertIn(u'verificación', subject)
body = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, body)
@mock.patch('roger.localize._get_country')
def test_get_email_pt(self, get_country_mock):
get_country_mock.return_value = 'BR'
code = '233'
receiver = '<EMAIL>'
subject = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, subject)
self.assertIn(u'verificação', subject)
body = localize.get_string('email.challenge_code.subject', args={'code': code}, receiver=receiver)
self.assertIn(code, body)
def test_get_sms_en(self):
code = '233'
receiver = '+1242323424'
text = localize.get_string('sms.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verification', text)
# all non-localized countries get en-us
code = '233'
receiver = '+7242323424'
text = localize.get_string('sms.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verification', text)
def test_get_sms_es(self):
code = '233'
receiver = '+342342323424'
text = localize.get_string('sms.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verificación', text)
def test_get_sms_pt(self):
code = '233'
receiver = '+5542323424'
text = localize.get_string('sms.challenge_code', args={'code': code}, receiver=receiver)
self.assertIn(code, text)
self.assertIn(u'verificação', text)
| en | 0.697669 | # -*- coding: utf-8 -*- # all non-localized countries get en-us # all non-localized countries get en-us # all non-localized countries get en-us | 2.433421 | 2 |
test/plot_rosenbrock.py | elnjensen/DiskJockey | 0 | 6612554 | <reponame>elnjensen/DiskJockey
import numpy as np
import matplotlib.pyplot as plt
def rosenbrock(x,y):
a = 1.
b = 100.
return -((a - x)**2 + b * (y - x**2)**2)
N = 100
xs = np.linspace(-3, 3, num=N)
ys = np.linspace(-1, 3, num=N)
XX,YY = np.meshgrid(xs, ys)
ZZ = rosenbrock(XX,YY)
mm = np.max(ZZ)
plt.contour(XX,YY, ZZ, levels=np.linspace(mm - 10, mm, num=10))
plt.savefig("contour.png")
| import numpy as np
import matplotlib.pyplot as plt
def rosenbrock(x,y):
a = 1.
b = 100.
return -((a - x)**2 + b * (y - x**2)**2)
N = 100
xs = np.linspace(-3, 3, num=N)
ys = np.linspace(-1, 3, num=N)
XX,YY = np.meshgrid(xs, ys)
ZZ = rosenbrock(XX,YY)
mm = np.max(ZZ)
plt.contour(XX,YY, ZZ, levels=np.linspace(mm - 10, mm, num=10))
plt.savefig("contour.png") | none | 1 | 3.134021 | 3 | |
src/library/__init__.py | mscelnik/rls-demo | 0 | 6612555 | <gh_stars>0
from . import model
from . import services
| from . import model
from . import services | none | 1 | 1.202163 | 1 | |
src/app/api/routes/health.py | tsungchih/python-graphql | 0 | 6612556 | #-*- coding: utf-8 -*-
from fastapi import APIRouter
from app.message import response
from starlette.status import HTTP_200_OK
router = APIRouter()
@router.get("/health", status_code=HTTP_200_OK)
async def health_check():
check_result = response.HealthCheckResponse().message
return {"message": check_result}
| #-*- coding: utf-8 -*-
from fastapi import APIRouter
from app.message import response
from starlette.status import HTTP_200_OK
router = APIRouter()
@router.get("/health", status_code=HTTP_200_OK)
async def health_check():
check_result = response.HealthCheckResponse().message
return {"message": check_result}
| en | 0.636498 | #-*- coding: utf-8 -*- | 2.246779 | 2 |
environment_server/actor_data.py | Bjacobwork/AnotherAgent57 | 0 | 6612557 | from multiprocessing import shared_memory, Lock
import numpy as np
import functools
class ActorData:
def __init__(self, params, batch_size, address=None):
dtype = params['Misc']['dtype']
element_size = 4
dtype_size = {"float16": 2, "float32": 4, "float64": 8}[dtype]
hidden_size = params['Agent57']['lstm']['units'] * 4
obs_shape = params['Misc']['obs_shape']
obs_size = functools.reduce(lambda a, b: a * b, obs_shape)
memory_size = batch_size * (
3 + 5 * dtype_size + dtype_size * hidden_size + obs_size + 4 * element_size) + 2 * element_size + 1
if address:
self.shared_mem = shared_memory.SharedMemory(name=address)
else:
self.shared_mem = shared_memory.SharedMemory(create=True, size=memory_size)
self.lock = Lock()
start = 0
end = 1
self.status = np.ndarray(1, dtype=np.uint8, buffer=self.shared_mem.buf[start:end])
start = 1
end += 2 * element_size
self.timer = np.ndarray(1, dtype=np.float64, buffer=self.shared_mem.buf[start:end])
start = end
end += batch_size * element_size
self.episode_ids = np.ndarray(batch_size, dtype=np.uint32, buffer=self.shared_mem.buf[start:end])
start = end
end += batch_size * element_size
self.steps = np.ndarray(batch_size, dtype=np.uint32, buffer=self.shared_mem.buf[start:end])
start = end
end += batch_size
self.j = np.ndarray(batch_size, dtype=np.uint8, buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * batch_size
self.extrinsic_rewards = np.ndarray((batch_size, 1), dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * batch_size
self.intrinsic_rewards = np.ndarray((batch_size, 1), dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += element_size * batch_size
self.actions = np.ndarray(batch_size, dtype=np.int32, buffer=self.shared_mem.buf[start:end])
start = end
end += element_size * batch_size
self.prev_actions = np.ndarray(batch_size, dtype=np.int32, buffer=self.shared_mem.buf[start:end])
start = end
end += obs_size * batch_size
self.observations = np.ndarray((batch_size, obs_shape[1], obs_shape[2], obs_shape[3]), dtype=np.uint8,
buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * hidden_size * batch_size
self.hidden = np.ndarray((batch_size, hidden_size), dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * batch_size
self.mu = np.ndarray(batch_size, dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * batch_size
self.q_value = np.ndarray(batch_size, dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * batch_size
self.discounted_q = np.ndarray(batch_size, dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += batch_size
self.resets = np.ndarray(batch_size, dtype='bool', buffer=self.shared_mem.buf[start:end])
start = end
end += batch_size
self.loss_of_life = np.ndarray(batch_size, dtype='bool', buffer=self.shared_mem.buf[start:end])
if __name__ == "__main__":
import yaml
with open('../actors/params.yml', 'r') as file:
params = yaml.full_load(file)
foo = ActorData(params, 6)
bar = ActorData(params, 6, address=foo.shared_mem.name)
with bar.lock:
bar.resets[-1] = True
print(foo.resets)
| from multiprocessing import shared_memory, Lock
import numpy as np
import functools
class ActorData:
def __init__(self, params, batch_size, address=None):
dtype = params['Misc']['dtype']
element_size = 4
dtype_size = {"float16": 2, "float32": 4, "float64": 8}[dtype]
hidden_size = params['Agent57']['lstm']['units'] * 4
obs_shape = params['Misc']['obs_shape']
obs_size = functools.reduce(lambda a, b: a * b, obs_shape)
memory_size = batch_size * (
3 + 5 * dtype_size + dtype_size * hidden_size + obs_size + 4 * element_size) + 2 * element_size + 1
if address:
self.shared_mem = shared_memory.SharedMemory(name=address)
else:
self.shared_mem = shared_memory.SharedMemory(create=True, size=memory_size)
self.lock = Lock()
start = 0
end = 1
self.status = np.ndarray(1, dtype=np.uint8, buffer=self.shared_mem.buf[start:end])
start = 1
end += 2 * element_size
self.timer = np.ndarray(1, dtype=np.float64, buffer=self.shared_mem.buf[start:end])
start = end
end += batch_size * element_size
self.episode_ids = np.ndarray(batch_size, dtype=np.uint32, buffer=self.shared_mem.buf[start:end])
start = end
end += batch_size * element_size
self.steps = np.ndarray(batch_size, dtype=np.uint32, buffer=self.shared_mem.buf[start:end])
start = end
end += batch_size
self.j = np.ndarray(batch_size, dtype=np.uint8, buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * batch_size
self.extrinsic_rewards = np.ndarray((batch_size, 1), dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * batch_size
self.intrinsic_rewards = np.ndarray((batch_size, 1), dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += element_size * batch_size
self.actions = np.ndarray(batch_size, dtype=np.int32, buffer=self.shared_mem.buf[start:end])
start = end
end += element_size * batch_size
self.prev_actions = np.ndarray(batch_size, dtype=np.int32, buffer=self.shared_mem.buf[start:end])
start = end
end += obs_size * batch_size
self.observations = np.ndarray((batch_size, obs_shape[1], obs_shape[2], obs_shape[3]), dtype=np.uint8,
buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * hidden_size * batch_size
self.hidden = np.ndarray((batch_size, hidden_size), dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * batch_size
self.mu = np.ndarray(batch_size, dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * batch_size
self.q_value = np.ndarray(batch_size, dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += dtype_size * batch_size
self.discounted_q = np.ndarray(batch_size, dtype=dtype, buffer=self.shared_mem.buf[start:end])
start = end
end += batch_size
self.resets = np.ndarray(batch_size, dtype='bool', buffer=self.shared_mem.buf[start:end])
start = end
end += batch_size
self.loss_of_life = np.ndarray(batch_size, dtype='bool', buffer=self.shared_mem.buf[start:end])
if __name__ == "__main__":
import yaml
with open('../actors/params.yml', 'r') as file:
params = yaml.full_load(file)
foo = ActorData(params, 6)
bar = ActorData(params, 6, address=foo.shared_mem.name)
with bar.lock:
bar.resets[-1] = True
print(foo.resets)
| none | 1 | 2.480476 | 2 | |
tests/make_unbound_target.py | philiparvidsson/pymake | 2 | 6612558 | <reponame>philiparvidsson/pymake<filename>tests/make_unbound_target.py
#!/usr/bin/env python
#---------------------------------------
# IMPORTS
#---------------------------------------
import test
from pymake2 import *
#---------------------------------------
# FUNCTIONS
#---------------------------------------
@default_conf({})
def my_target():
pass
#---------------------------------------
# SCRIPT
#---------------------------------------
test.should_fail()
pymake2({}, [ 'my_target' ])
test.success()
| #!/usr/bin/env python
#---------------------------------------
# IMPORTS
#---------------------------------------
import test
from pymake2 import *
#---------------------------------------
# FUNCTIONS
#---------------------------------------
@default_conf({})
def my_target():
pass
#---------------------------------------
# SCRIPT
#---------------------------------------
test.should_fail()
pymake2({}, [ 'my_target' ])
test.success() | pt | 0.091974 | #!/usr/bin/env python #--------------------------------------- # IMPORTS #--------------------------------------- #--------------------------------------- # FUNCTIONS #--------------------------------------- #--------------------------------------- # SCRIPT #--------------------------------------- | 1.811916 | 2 |
src/calc_centrality.py | allenwoods/graph_centrality | 0 | 6612559 | <reponame>allenwoods/graph_centrality<filename>src/calc_centrality.py
# -*- coding: utf-8 -*-
#+Author:<NAME>
import numpy as np
import numpy.linalg as la
import src.find_paths as find
mat = np.matrix
def degree_centrality(graph):
degree = [sum(line) for line in graph.adj_mtx]
return degree
def eigenvector_centrality(graph):
return la.eigvals(graph.adj_mtx)
def katz_centrality(graph, alpha=0.3, beta=0.3):
A = graph.adj_mtx
I = np.identity(len(A))
one = np.array([1 for i in range(len(A))])
katz = (beta*mat(I - alpha*A.T).I).dot(one)
# Change into list for further process
return katz.A1
def pagerank_centrality(graph):
A = graph.adj_mtx
I = np.identity(len(A))
D = np.identity(len(A))
count = 0
for i in A.sum(axis=1):
D[count] = np.multiply(D[count],i)
count += 1
D = np.mat(D)
alpha = 1/max(la.eigvals(A)) * 0.9
beta = 0.3
one = np.array([1 for i in range(len(A))])
pagerank = (beta*mat(I -mat((alpha*A.T).dot(D.I))).I.dot(one))
return pagerank.A1
def betweenness_centrality(graph):
shortest_paths = find.all_shortest_paths(graph)
nodes = graph.nodes
betweenness = list()
for n in nodes:
n_betweenness = 0
for paths in shortest_paths:
sub_n_betweenness = 0
for path in paths:
if n in path[1:-1]:
#Don't need the path has the node on both end
sub_n_betweenness += 1
n_betweenness += (sub_n_betweenness/len(paths))*2
betweenness.append(n_betweenness)
return betweenness
def closeness_centrality(graph):
shortest_paths = find.all_shortest_paths(graph)
nodes = graph.nodes
closeness = list()
for n in nodes:
n_closeness = 0
for paths in shortest_paths:
sub_n_closeness = 0
for path in paths:
if n not in path[:1] and n not in path[-1:]:
break
else:
sub_n_closeness = len(path)-1
break
n_closeness += sub_n_closeness
closeness.append(1/(n_closeness/(len(nodes)-1)))
return closeness
| # -*- coding: utf-8 -*-
#+Author:<NAME>
import numpy as np
import numpy.linalg as la
import src.find_paths as find
mat = np.matrix
def degree_centrality(graph):
degree = [sum(line) for line in graph.adj_mtx]
return degree
def eigenvector_centrality(graph):
return la.eigvals(graph.adj_mtx)
def katz_centrality(graph, alpha=0.3, beta=0.3):
A = graph.adj_mtx
I = np.identity(len(A))
one = np.array([1 for i in range(len(A))])
katz = (beta*mat(I - alpha*A.T).I).dot(one)
# Change into list for further process
return katz.A1
def pagerank_centrality(graph):
A = graph.adj_mtx
I = np.identity(len(A))
D = np.identity(len(A))
count = 0
for i in A.sum(axis=1):
D[count] = np.multiply(D[count],i)
count += 1
D = np.mat(D)
alpha = 1/max(la.eigvals(A)) * 0.9
beta = 0.3
one = np.array([1 for i in range(len(A))])
pagerank = (beta*mat(I -mat((alpha*A.T).dot(D.I))).I.dot(one))
return pagerank.A1
def betweenness_centrality(graph):
shortest_paths = find.all_shortest_paths(graph)
nodes = graph.nodes
betweenness = list()
for n in nodes:
n_betweenness = 0
for paths in shortest_paths:
sub_n_betweenness = 0
for path in paths:
if n in path[1:-1]:
#Don't need the path has the node on both end
sub_n_betweenness += 1
n_betweenness += (sub_n_betweenness/len(paths))*2
betweenness.append(n_betweenness)
return betweenness
def closeness_centrality(graph):
shortest_paths = find.all_shortest_paths(graph)
nodes = graph.nodes
closeness = list()
for n in nodes:
n_closeness = 0
for paths in shortest_paths:
sub_n_closeness = 0
for path in paths:
if n not in path[:1] and n not in path[-1:]:
break
else:
sub_n_closeness = len(path)-1
break
n_closeness += sub_n_closeness
closeness.append(1/(n_closeness/(len(nodes)-1)))
return closeness | en | 0.939943 | # -*- coding: utf-8 -*- #+Author:<NAME> # Change into list for further process #Don't need the path has the node on both end | 2.812135 | 3 |
sts-automation/scripts/kenna-tag-alignment.py | cihatyildiz/vm-scripts | 0 | 6612560 | import sys, os, requests, json, time
from requests.auth import HTTPBasicAuth
from datetime import datetime
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
from lib.jira import *
from lib.kenna import *
from lib.sts import *
config_file = "data/tag-alignment.json"
kenna_token = os.environ['KENNA_TOKEN'].replace('"', "")
if __name__ == "__main__":
total_assets = 0
with open(config_file) as config_data:
config_json = json.load(config_data)
for v in config_json["assets"]:
asset_ids = getAssetIdsByRiskMeter(kenna_token, v["riskmeter"])
if len(asset_ids) == 0:
print("Desktop assets dont have #Network tag")
sys.exit()
print(asset_ids)
if v["operation"] == "tag-remove":
tag_to_remove = v["tags"]
print(tag_to_remove)
for asset_id in asset_ids:
results = removeKennaTag(kenna_token, asset_id, tag_to_remove)
print(results)
total_assets += len(asset_ids)
print("{} Assets has been updated in this process.".format(total_assets))
| import sys, os, requests, json, time
from requests.auth import HTTPBasicAuth
from datetime import datetime
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
from lib.jira import *
from lib.kenna import *
from lib.sts import *
config_file = "data/tag-alignment.json"
kenna_token = os.environ['KENNA_TOKEN'].replace('"', "")
if __name__ == "__main__":
total_assets = 0
with open(config_file) as config_data:
config_json = json.load(config_data)
for v in config_json["assets"]:
asset_ids = getAssetIdsByRiskMeter(kenna_token, v["riskmeter"])
if len(asset_ids) == 0:
print("Desktop assets dont have #Network tag")
sys.exit()
print(asset_ids)
if v["operation"] == "tag-remove":
tag_to_remove = v["tags"]
print(tag_to_remove)
for asset_id in asset_ids:
results = removeKennaTag(kenna_token, asset_id, tag_to_remove)
print(results)
total_assets += len(asset_ids)
print("{} Assets has been updated in this process.".format(total_assets))
| es | 0.298172 | #Network tag") | 2.34053 | 2 |
numba/type_inference/modules/builtinmodule.py | shiquanwang/numba | 1 | 6612561 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Type functions for Python builtins.
"""
from __future__ import print_function, division, absolute_import
from numba import *
from numba import nodes
from numba import error
# from numba import function_util
# from numba.specialize.mathcalls import is_math_function
from numba.symtab import Variable
from numba import typesystem
from numba.typesystem import is_obj, promote_closest, get_type
from numba.type_inference.modules import utils
#----------------------------------------------------------------------------
# Utilities
#----------------------------------------------------------------------------
register_builtin = utils.register_with_argchecking
def cast(node, dst_type):
if len(node.args) == 0:
return nodes.ConstNode(0, dst_type)
else:
return nodes.CoercionNode(node.args[0], dst_type=dst_type)
#----------------------------------------------------------------------------
# Type Functions for Builtins
#----------------------------------------------------------------------------
# TODO: add specializer functions to insert coercions before late specialization
# TODO: don't rewrite AST here
@register_builtin((1, 2, 3), can_handle_deferred_types=True)
def range_(context, node, start, stop, step):
node.variable = Variable(typesystem.RangeType())
node.args = nodes.CoercionNode.coerce(node.args, dst_type=Py_ssize_t)
return node
if not PY3:
@register_builtin((1, 2, 3), can_handle_deferred_types=True)
def xrange_(context, node, start, stop, step):
return range_(context, node, start, stop, step)
@register_builtin(1)
def len_(context, node, obj):
# Simplify len(array) to ndarray.shape[0]
argtype = get_type(obj)
if argtype.is_array:
shape_attr = nodes.ArrayAttributeNode('shape', node.args[0])
new_node = nodes.index(shape_attr, 0)
return new_node
return Py_ssize_t
@register_builtin((0, 1, 2), can_handle_deferred_types=True)
def _int(context, node, x, base, dst_type=int_):
# Resolve int(x) and float(x) to an equivalent cast
if len(node.args) < 2:
return cast(node, dst_type)
node.variable = Variable(dst_type)
return node
if not PY3:
@register_builtin((0, 1, 2), can_handle_deferred_types=True)
def _long(context, node, x, base):
return _int(context, node, x, base)
@register_builtin((0, 1), can_handle_deferred_types=True)
def _float(context, node, x):
return cast(node, double)
@register_builtin((0, 1, 2), can_handle_deferred_types=True)
def complex_(context, node, a, b):
if len(node.args) == 2:
args = nodes.CoercionNode.coerce(node.args, double)
return nodes.ComplexNode(real=args[0], imag=args[1])
else:
return cast(node, complex128)
def abstype(argtype):
if argtype.is_complex:
result_type = double
elif argtype.is_float or argtype.is_int:
result_type = argtype
else:
result_type = object_
return result_type
@register_builtin(1)
def abs_(context, node, x):
node.variable = Variable(abstype(get_type(x)))
return node
@register_builtin((2, 3))
def pow_(context, node, base, exponent, mod):
from . import mathmodule
return mathmodule.pow_(context, node, base, exponent)
@register_builtin((1, 2))
def round_(context, node, number, ndigits):
# is_math = is_math_function(node.args, round)
argtype = get_type(number)
if len(node.args) == 1 and argtype.is_int:
# round(myint) -> float(myint)
return nodes.CoercionNode(node.args[0], double)
if argtype.is_float or argtype.is_int:
dst_type = double
else:
dst_type = object_
node.args[0] = nodes.CoercionNode(node.args[0], object_)
node.variable = Variable(dst_type)
return node # nodes.CoercionNode(node, double)
@register_builtin(0)
def globals_(context, node):
return typesystem.dict_
# return nodes.ObjectInjectNode(func.__globals__)
@register_builtin(0)
def locals_(context, node):
raise error.NumbaError("locals() is not supported in numba functions")
| # -*- coding: utf-8 -*-
"""
Type functions for Python builtins.
"""
from __future__ import print_function, division, absolute_import
from numba import *
from numba import nodes
from numba import error
# from numba import function_util
# from numba.specialize.mathcalls import is_math_function
from numba.symtab import Variable
from numba import typesystem
from numba.typesystem import is_obj, promote_closest, get_type
from numba.type_inference.modules import utils
#----------------------------------------------------------------------------
# Utilities
#----------------------------------------------------------------------------
register_builtin = utils.register_with_argchecking
def cast(node, dst_type):
if len(node.args) == 0:
return nodes.ConstNode(0, dst_type)
else:
return nodes.CoercionNode(node.args[0], dst_type=dst_type)
#----------------------------------------------------------------------------
# Type Functions for Builtins
#----------------------------------------------------------------------------
# TODO: add specializer functions to insert coercions before late specialization
# TODO: don't rewrite AST here
@register_builtin((1, 2, 3), can_handle_deferred_types=True)
def range_(context, node, start, stop, step):
node.variable = Variable(typesystem.RangeType())
node.args = nodes.CoercionNode.coerce(node.args, dst_type=Py_ssize_t)
return node
if not PY3:
@register_builtin((1, 2, 3), can_handle_deferred_types=True)
def xrange_(context, node, start, stop, step):
return range_(context, node, start, stop, step)
@register_builtin(1)
def len_(context, node, obj):
# Simplify len(array) to ndarray.shape[0]
argtype = get_type(obj)
if argtype.is_array:
shape_attr = nodes.ArrayAttributeNode('shape', node.args[0])
new_node = nodes.index(shape_attr, 0)
return new_node
return Py_ssize_t
@register_builtin((0, 1, 2), can_handle_deferred_types=True)
def _int(context, node, x, base, dst_type=int_):
# Resolve int(x) and float(x) to an equivalent cast
if len(node.args) < 2:
return cast(node, dst_type)
node.variable = Variable(dst_type)
return node
if not PY3:
@register_builtin((0, 1, 2), can_handle_deferred_types=True)
def _long(context, node, x, base):
return _int(context, node, x, base)
@register_builtin((0, 1), can_handle_deferred_types=True)
def _float(context, node, x):
return cast(node, double)
@register_builtin((0, 1, 2), can_handle_deferred_types=True)
def complex_(context, node, a, b):
if len(node.args) == 2:
args = nodes.CoercionNode.coerce(node.args, double)
return nodes.ComplexNode(real=args[0], imag=args[1])
else:
return cast(node, complex128)
def abstype(argtype):
if argtype.is_complex:
result_type = double
elif argtype.is_float or argtype.is_int:
result_type = argtype
else:
result_type = object_
return result_type
@register_builtin(1)
def abs_(context, node, x):
node.variable = Variable(abstype(get_type(x)))
return node
@register_builtin((2, 3))
def pow_(context, node, base, exponent, mod):
from . import mathmodule
return mathmodule.pow_(context, node, base, exponent)
@register_builtin((1, 2))
def round_(context, node, number, ndigits):
# is_math = is_math_function(node.args, round)
argtype = get_type(number)
if len(node.args) == 1 and argtype.is_int:
# round(myint) -> float(myint)
return nodes.CoercionNode(node.args[0], double)
if argtype.is_float or argtype.is_int:
dst_type = double
else:
dst_type = object_
node.args[0] = nodes.CoercionNode(node.args[0], object_)
node.variable = Variable(dst_type)
return node # nodes.CoercionNode(node, double)
@register_builtin(0)
def globals_(context, node):
return typesystem.dict_
# return nodes.ObjectInjectNode(func.__globals__)
@register_builtin(0)
def locals_(context, node):
raise error.NumbaError("locals() is not supported in numba functions") | en | 0.323354 | # -*- coding: utf-8 -*- Type functions for Python builtins. # from numba import function_util # from numba.specialize.mathcalls import is_math_function #---------------------------------------------------------------------------- # Utilities #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- # Type Functions for Builtins #---------------------------------------------------------------------------- # TODO: add specializer functions to insert coercions before late specialization # TODO: don't rewrite AST here # Simplify len(array) to ndarray.shape[0] # Resolve int(x) and float(x) to an equivalent cast # is_math = is_math_function(node.args, round) # round(myint) -> float(myint) # nodes.CoercionNode(node, double) # return nodes.ObjectInjectNode(func.__globals__) | 2.32505 | 2 |
jupiter/remote/__init__.py | horia141/jupiter | 15 | 6612562 | """The remote stack of synchronisation with other sorts of systems."""
| """The remote stack of synchronisation with other sorts of systems."""
| en | 0.823032 | The remote stack of synchronisation with other sorts of systems. | 0.937861 | 1 |
match_answer.py | jianglangcaisheng/answer_AI | 0 | 6612563 | from skimage import io
import os
import numpy as np
DEBUG = 0
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if DEBUG:
print(BASE_DIR)
PICS_DIR = os.path.join(BASE_DIR,"..\\pics\\test_match")
if DEBUG:
print(PICS_DIR)
GREY = [247, 247, 247]
GREEN = [148, 211, 77]
WHITE = [255, 255, 255]
vertex_top = 1233
vertex_left = 174
box_width_all = 735
box_height_all = 112
start_top = 1257
start_left = 352
box_width = int(735 / 2)
box_height = int(112 * 2/3)
interval_height = int((1738 - 1233) / 3)
question_pos = [1054, 1215, 59, 1000]
def crop_answer(whole_img):
answer_1 = whole_img[start_top+interval_height*0:start_top+box_height+interval_height*0, start_left:start_left+box_width, 0:3]
answer_2 = whole_img[start_top+interval_height*1:start_top+box_height+interval_height*1, start_left:start_left+box_width, 0:3]
answer_3 = whole_img[start_top+interval_height*2:start_top+box_height+interval_height*2, start_left:start_left+box_width, 0:3]
answer_4 = whole_img[start_top+interval_height*3:start_top+box_height+interval_height*3, start_left:start_left+box_width, 0:3]
return answer_1, answer_2, answer_3, answer_4
def cal_num_scalar(image, color):
num =0
for loop in range(image.shape[0]):
for loop2 in range(image.shape[1]):
if image[loop][loop2][0] == color[0] :# and image[loop][loop2][1] == color[1] and image[loop][loop2][2] == color[2]:
continue
else:
#print(image[loop][loop2][0:3])
num = num+1
return num
def cal_num(image, color):
num = 0
image_useful = image[:, :, 0] != color[0]
num = np.sum(np.sum(image_useful))
return int(num)
def cal_num_cat(image, color):
if 0:
height_split = int(image.shape[0]/3)
num = ""
for i in range(3):
image_useful = image[height_split * i:height_split * (i+1), :, 0] != color[0]
num1 = np.sum(np.sum(image_useful))
num += str(num1)
return int(np.int(num))
else:
width_split = int(image.shape[1]/2)
data_str = ""
for i in range(2):
image_useful = image[:, width_split * i:width_split * (i+1), 0] != color[0]
num = np.sum(np.sum(image_useful))
num_str = str(num)
if num_str.__len__() == 1:
num_str = "0000" + num_str
elif num_str.__len__() == 2:
num_str = "000" + num_str
elif num_str.__len__() == 3:
num_str = "00" + num_str
elif num_str.__len__() == 4:
num_str = "0" + num_str
elif num_str.__len__() == 5:
pass
else:
assert False, "num_str length error. length: %d" % num_str.__len__()
data_str += num_str
return data_str
def cal_num1(image, color):
num =0
for loop in range(image.shape[0]):
for loop2 in range(image.shape[1]):
if sum(image[loop][loop2][0:3] == color) == 3:
continue
else:
#print(image[loop][loop2][0:3])
num = num+1
return num
def selection(correct_loss, loss1, loss2, loss3, loss4):
a = np.array([loss1, loss2, loss3, loss4])
a = np.abs(a-correct_loss)
sort_id = np.argmin(a)
#print("selection: ",a, sort_id)
return sort_id
def selection_str(correct_loss, loss1, loss2, loss3, loss4):
def split_str(loss):
loss_1 = loss[0:5]
loss_2 = loss[5:10]
out = np.zeros(shape=(1, 2))
out[0, 0] = int(loss_1)
out[0, 1] = int(loss_2)
return out
a = np.concatenate([split_str(loss1), split_str(loss2), split_str(loss3), split_str(loss4)], axis=0)
a = np.abs(a-split_str(correct_loss))
b = np.max(a, axis=1)
sort_id = np.argmin(b)
# print("selection: ",b, sort_id)
return sort_id
def selection_str_rValue(correct_loss, loss1, loss2, loss3, loss4):
def split_str(loss):
loss_1 = loss[0:5]
loss_2 = loss[5:10]
out = np.zeros(shape=(1, 2))
try:
out[0, 0] = int(loss_1)
out[0, 1] = int(loss_2)
except ValueError:
print(loss)
assert False, "ValueError"
return out
a = np.concatenate([split_str(loss1), split_str(loss2), split_str(loss3), split_str(loss4)], axis=0)
a = np.abs(a-split_str(correct_loss))
b = np.max(a, axis=1)
sort_id = np.argmin(b)
# print("selection: ",b, sort_id)
return [sort_id, b[sort_id]]
if __name__ == "__main__":
#img_label_green_2 = io.imread(os.path.join(PICS_DIR,"answer_1.png"))
#img_question = io.imread(os.path.join(PICS_DIR,"question_0.png"))
#img_question_2 = io.imread(os.path.join(PICS_DIR,"question_1.png"))
#img_whole_green = io.imread(os.path.join(PICS_DIR,"autojump_1.png"))
##raw grey image
img_whole_grey = io.imread(os.path.join(PICS_DIR,"autojump_0.png"))
##crop question and answer,and get descriptor
question = img_whole_grey[question_pos[0]:question_pos[1], question_pos[2]:question_pos[3],0:3]
correct_question = cal_num(question, WHITE)
## another raw image
img_whole_grey = io.imread(os.path.join(PICS_DIR,"autojump_1.png"))
##crop question and answer,and get descriptor
question_new = img_whole_grey[question_pos[0]:question_pos[1], question_pos[2]:question_pos[3],0:3]
correct_question_new = cal_num(question, WHITE)
#########
io.imshow(question-question_new)
answer_1, answer_2, answer_3, answer_4 = crop_answer(img_whole_grey)
loss1 = cal_num(answer_1, GREY)
loss2 = cal_num(answer_2, GREY)
loss3 = cal_num(answer_3, GREY)
loss4 = cal_num(answer_4, GREY)
##calculate library's key value(questions')
img_question = io.imread(os.path.join(PICS_DIR,"question_0.png"))
loss_ques = cal_num(img_question, WHITE)
correct_answer = io.imread(os.path.join(PICS_DIR,"answer_0.png"))
correct_loss = cal_num(correct_answer, GREEN)
id = selection(correct_loss, loss1, loss2, loss3, loss4)
print(id)
#i=3
#img_label_grey_first = img_whole_grey[start_top+interval_height*i:start_top+box_height+interval_height*i, start_left:start_left+box_width, 0:3]
#img_label_grey_second = img_whole_green[start_top+interval_height*i:start_top+box_height+interval_height*i, start_left:start_left+box_width, 0:3]
#io.imshow(-img_label_grey_second+img_label_grey_first)
#io.imshow(img_label_grey_second-img_label_grey_first)
#label_num_pixel = cal_num(img_label_green, GREEN)
#print("LABEL_NUM_PIXEL: ", label_num_pixel)
#
#
#label_num_pixel_2 = cal_num(img_label_green_2, GREEN)
#print("LABEL_NUM_PIXEL_2: ", label_num_pixel_2)
#
#label_num_pixel_3 = cal_num(img_label_green_3, GREEN)
#print("LABEL_NUM_PIXEL_3: ", label_num_pixel_3)
#
#Q_num_pixel = cal_num(img_question, WHITE)
#print("Q_NUM_PIXEL: ", Q_num_pixel)
#
#label_num_pixel_grey = cal_num(img_label_grey, GREY)
#print("LABEL_NUM_PIXEL_GREY: ", label_num_pixel_grey)
#
#label_num_pixel_grey_first = cal_num(img_label_grey_first, GREY)
#print("LABEL_NUM_PIXEL_GREY_F: ", label_num_pixel_grey_first)
#
#label_num_pixel_grey_second = cal_num(img_label_grey_second, GREEN)
#print("LABEL_NUM_PIXEL_GREY_S: ", label_num_pixel_grey_second)
| from skimage import io
import os
import numpy as np
DEBUG = 0
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if DEBUG:
print(BASE_DIR)
PICS_DIR = os.path.join(BASE_DIR,"..\\pics\\test_match")
if DEBUG:
print(PICS_DIR)
GREY = [247, 247, 247]
GREEN = [148, 211, 77]
WHITE = [255, 255, 255]
vertex_top = 1233
vertex_left = 174
box_width_all = 735
box_height_all = 112
start_top = 1257
start_left = 352
box_width = int(735 / 2)
box_height = int(112 * 2/3)
interval_height = int((1738 - 1233) / 3)
question_pos = [1054, 1215, 59, 1000]
def crop_answer(whole_img):
answer_1 = whole_img[start_top+interval_height*0:start_top+box_height+interval_height*0, start_left:start_left+box_width, 0:3]
answer_2 = whole_img[start_top+interval_height*1:start_top+box_height+interval_height*1, start_left:start_left+box_width, 0:3]
answer_3 = whole_img[start_top+interval_height*2:start_top+box_height+interval_height*2, start_left:start_left+box_width, 0:3]
answer_4 = whole_img[start_top+interval_height*3:start_top+box_height+interval_height*3, start_left:start_left+box_width, 0:3]
return answer_1, answer_2, answer_3, answer_4
def cal_num_scalar(image, color):
num =0
for loop in range(image.shape[0]):
for loop2 in range(image.shape[1]):
if image[loop][loop2][0] == color[0] :# and image[loop][loop2][1] == color[1] and image[loop][loop2][2] == color[2]:
continue
else:
#print(image[loop][loop2][0:3])
num = num+1
return num
def cal_num(image, color):
num = 0
image_useful = image[:, :, 0] != color[0]
num = np.sum(np.sum(image_useful))
return int(num)
def cal_num_cat(image, color):
if 0:
height_split = int(image.shape[0]/3)
num = ""
for i in range(3):
image_useful = image[height_split * i:height_split * (i+1), :, 0] != color[0]
num1 = np.sum(np.sum(image_useful))
num += str(num1)
return int(np.int(num))
else:
width_split = int(image.shape[1]/2)
data_str = ""
for i in range(2):
image_useful = image[:, width_split * i:width_split * (i+1), 0] != color[0]
num = np.sum(np.sum(image_useful))
num_str = str(num)
if num_str.__len__() == 1:
num_str = "0000" + num_str
elif num_str.__len__() == 2:
num_str = "000" + num_str
elif num_str.__len__() == 3:
num_str = "00" + num_str
elif num_str.__len__() == 4:
num_str = "0" + num_str
elif num_str.__len__() == 5:
pass
else:
assert False, "num_str length error. length: %d" % num_str.__len__()
data_str += num_str
return data_str
def cal_num1(image, color):
num =0
for loop in range(image.shape[0]):
for loop2 in range(image.shape[1]):
if sum(image[loop][loop2][0:3] == color) == 3:
continue
else:
#print(image[loop][loop2][0:3])
num = num+1
return num
def selection(correct_loss, loss1, loss2, loss3, loss4):
a = np.array([loss1, loss2, loss3, loss4])
a = np.abs(a-correct_loss)
sort_id = np.argmin(a)
#print("selection: ",a, sort_id)
return sort_id
def selection_str(correct_loss, loss1, loss2, loss3, loss4):
def split_str(loss):
loss_1 = loss[0:5]
loss_2 = loss[5:10]
out = np.zeros(shape=(1, 2))
out[0, 0] = int(loss_1)
out[0, 1] = int(loss_2)
return out
a = np.concatenate([split_str(loss1), split_str(loss2), split_str(loss3), split_str(loss4)], axis=0)
a = np.abs(a-split_str(correct_loss))
b = np.max(a, axis=1)
sort_id = np.argmin(b)
# print("selection: ",b, sort_id)
return sort_id
def selection_str_rValue(correct_loss, loss1, loss2, loss3, loss4):
def split_str(loss):
loss_1 = loss[0:5]
loss_2 = loss[5:10]
out = np.zeros(shape=(1, 2))
try:
out[0, 0] = int(loss_1)
out[0, 1] = int(loss_2)
except ValueError:
print(loss)
assert False, "ValueError"
return out
a = np.concatenate([split_str(loss1), split_str(loss2), split_str(loss3), split_str(loss4)], axis=0)
a = np.abs(a-split_str(correct_loss))
b = np.max(a, axis=1)
sort_id = np.argmin(b)
# print("selection: ",b, sort_id)
return [sort_id, b[sort_id]]
if __name__ == "__main__":
#img_label_green_2 = io.imread(os.path.join(PICS_DIR,"answer_1.png"))
#img_question = io.imread(os.path.join(PICS_DIR,"question_0.png"))
#img_question_2 = io.imread(os.path.join(PICS_DIR,"question_1.png"))
#img_whole_green = io.imread(os.path.join(PICS_DIR,"autojump_1.png"))
##raw grey image
img_whole_grey = io.imread(os.path.join(PICS_DIR,"autojump_0.png"))
##crop question and answer,and get descriptor
question = img_whole_grey[question_pos[0]:question_pos[1], question_pos[2]:question_pos[3],0:3]
correct_question = cal_num(question, WHITE)
## another raw image
img_whole_grey = io.imread(os.path.join(PICS_DIR,"autojump_1.png"))
##crop question and answer,and get descriptor
question_new = img_whole_grey[question_pos[0]:question_pos[1], question_pos[2]:question_pos[3],0:3]
correct_question_new = cal_num(question, WHITE)
#########
io.imshow(question-question_new)
answer_1, answer_2, answer_3, answer_4 = crop_answer(img_whole_grey)
loss1 = cal_num(answer_1, GREY)
loss2 = cal_num(answer_2, GREY)
loss3 = cal_num(answer_3, GREY)
loss4 = cal_num(answer_4, GREY)
##calculate library's key value(questions')
img_question = io.imread(os.path.join(PICS_DIR,"question_0.png"))
loss_ques = cal_num(img_question, WHITE)
correct_answer = io.imread(os.path.join(PICS_DIR,"answer_0.png"))
correct_loss = cal_num(correct_answer, GREEN)
id = selection(correct_loss, loss1, loss2, loss3, loss4)
print(id)
#i=3
#img_label_grey_first = img_whole_grey[start_top+interval_height*i:start_top+box_height+interval_height*i, start_left:start_left+box_width, 0:3]
#img_label_grey_second = img_whole_green[start_top+interval_height*i:start_top+box_height+interval_height*i, start_left:start_left+box_width, 0:3]
#io.imshow(-img_label_grey_second+img_label_grey_first)
#io.imshow(img_label_grey_second-img_label_grey_first)
#label_num_pixel = cal_num(img_label_green, GREEN)
#print("LABEL_NUM_PIXEL: ", label_num_pixel)
#
#
#label_num_pixel_2 = cal_num(img_label_green_2, GREEN)
#print("LABEL_NUM_PIXEL_2: ", label_num_pixel_2)
#
#label_num_pixel_3 = cal_num(img_label_green_3, GREEN)
#print("LABEL_NUM_PIXEL_3: ", label_num_pixel_3)
#
#Q_num_pixel = cal_num(img_question, WHITE)
#print("Q_NUM_PIXEL: ", Q_num_pixel)
#
#label_num_pixel_grey = cal_num(img_label_grey, GREY)
#print("LABEL_NUM_PIXEL_GREY: ", label_num_pixel_grey)
#
#label_num_pixel_grey_first = cal_num(img_label_grey_first, GREY)
#print("LABEL_NUM_PIXEL_GREY_F: ", label_num_pixel_grey_first)
#
#label_num_pixel_grey_second = cal_num(img_label_grey_second, GREEN)
#print("LABEL_NUM_PIXEL_GREY_S: ", label_num_pixel_grey_second)
| en | 0.419425 | # and image[loop][loop2][1] == color[1] and image[loop][loop2][2] == color[2]: #print(image[loop][loop2][0:3]) #print(image[loop][loop2][0:3]) #print("selection: ",a, sort_id) # print("selection: ",b, sort_id) # print("selection: ",b, sort_id) #img_label_green_2 = io.imread(os.path.join(PICS_DIR,"answer_1.png")) #img_question = io.imread(os.path.join(PICS_DIR,"question_0.png")) #img_question_2 = io.imread(os.path.join(PICS_DIR,"question_1.png")) #img_whole_green = io.imread(os.path.join(PICS_DIR,"autojump_1.png")) ##raw grey image ##crop question and answer,and get descriptor ## another raw image ##crop question and answer,and get descriptor ######### ##calculate library's key value(questions') #i=3 #img_label_grey_first = img_whole_grey[start_top+interval_height*i:start_top+box_height+interval_height*i, start_left:start_left+box_width, 0:3] #img_label_grey_second = img_whole_green[start_top+interval_height*i:start_top+box_height+interval_height*i, start_left:start_left+box_width, 0:3] #io.imshow(-img_label_grey_second+img_label_grey_first) #io.imshow(img_label_grey_second-img_label_grey_first) #label_num_pixel = cal_num(img_label_green, GREEN) #print("LABEL_NUM_PIXEL: ", label_num_pixel) # # #label_num_pixel_2 = cal_num(img_label_green_2, GREEN) #print("LABEL_NUM_PIXEL_2: ", label_num_pixel_2) # #label_num_pixel_3 = cal_num(img_label_green_3, GREEN) #print("LABEL_NUM_PIXEL_3: ", label_num_pixel_3) # #Q_num_pixel = cal_num(img_question, WHITE) #print("Q_NUM_PIXEL: ", Q_num_pixel) # #label_num_pixel_grey = cal_num(img_label_grey, GREY) #print("LABEL_NUM_PIXEL_GREY: ", label_num_pixel_grey) # #label_num_pixel_grey_first = cal_num(img_label_grey_first, GREY) #print("LABEL_NUM_PIXEL_GREY_F: ", label_num_pixel_grey_first) # #label_num_pixel_grey_second = cal_num(img_label_grey_second, GREEN) #print("LABEL_NUM_PIXEL_GREY_S: ", label_num_pixel_grey_second) | 2.530427 | 3 |
test/test_random_forest.py | upul/ML-Workbench | 1 | 6612564 | <gh_stars>1-10
import numpy as np
from indi.ensemble import RandomForestClassifier
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([0, 0, 0, 1, 1, 1])
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = np.array([0, 1, 1])
def test_random_forest_classifier():
#cls = RandomForestClassifier(max_depth=5, n_trees=120, n_trials=1)
#cls.fit(X, y)
#print(cls.predict(T))
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pylab as plt
import seaborn as sbs;
n_samples = 5000
X, y = make_blobs(n_samples=n_samples, centers=2, n_features=2,
cluster_std=0.62, random_state=125)
#X, y = make_blobs(n_samples=300, centers=4,
#random_state=0, cluster_std=1.0)
cls = RandomForestClassifier(max_depth=125, n_trees=50, n_trials=1, n_min_leaf=1)
cls.fit(X, y)
#cls.visualize('./test.png')
h = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),
np.linspace(y_min, y_max, 100))
# Z = []
data = np.c_[xx.ravel(), yy.ravel()]
Z = cls.predict(data)
# Z = np.array(Z)
Z = Z.reshape(xx.shape)
_, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(14, 6))
ax1.scatter(X[:, 0], X[:, 1], c=y, alpha=0.5, edgecolors='none', s=45, cmap=plt.cm.Spectral)
ax2.contourf(xx, yy, Z, alpha=0.5, cmap=plt.cm.Spectral)
ax2.scatter(X[:, 0], X[:, 1], c=y, alpha=0.5, edgecolors='none', cmap=plt.cm.Spectral, s=45)
plt.xlim(X[:, 0].min(), X[:, 0].max())
plt.ylim(X[:, 1].min(), X[:, 1].max())
plt.show()
import sklearn.ensemble
clf = sklearn.ensemble.RandomForestClassifier()
clf.fit(X, y)
h = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),
np.linspace(y_min, y_max, 100))
# Z = []
data = np.c_[xx.ravel(), yy.ravel()]
Z = clf.predict(data)
# Z = np.array(Z)
Z = Z.reshape(xx.shape)
_, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(14, 6))
ax1.scatter(X[:, 0], X[:, 1], c=y, alpha=0.5, edgecolors='none', s=45, cmap=plt.cm.Spectral)
ax2.contourf(xx, yy, Z, alpha=0.5, cmap=plt.cm.Spectral)
ax2.scatter(X[:, 0], X[:, 1], c=y, alpha=0.5, edgecolors='none', cmap=plt.cm.Spectral, s=45)
plt.xlim(X[:, 0].min(), X[:, 0].max())
plt.ylim(X[:, 1].min(), X[:, 1].max())
plt.show()
| import numpy as np
from indi.ensemble import RandomForestClassifier
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([0, 0, 0, 1, 1, 1])
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = np.array([0, 1, 1])
def test_random_forest_classifier():
#cls = RandomForestClassifier(max_depth=5, n_trees=120, n_trials=1)
#cls.fit(X, y)
#print(cls.predict(T))
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pylab as plt
import seaborn as sbs;
n_samples = 5000
X, y = make_blobs(n_samples=n_samples, centers=2, n_features=2,
cluster_std=0.62, random_state=125)
#X, y = make_blobs(n_samples=300, centers=4,
#random_state=0, cluster_std=1.0)
cls = RandomForestClassifier(max_depth=125, n_trees=50, n_trials=1, n_min_leaf=1)
cls.fit(X, y)
#cls.visualize('./test.png')
h = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),
np.linspace(y_min, y_max, 100))
# Z = []
data = np.c_[xx.ravel(), yy.ravel()]
Z = cls.predict(data)
# Z = np.array(Z)
Z = Z.reshape(xx.shape)
_, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(14, 6))
ax1.scatter(X[:, 0], X[:, 1], c=y, alpha=0.5, edgecolors='none', s=45, cmap=plt.cm.Spectral)
ax2.contourf(xx, yy, Z, alpha=0.5, cmap=plt.cm.Spectral)
ax2.scatter(X[:, 0], X[:, 1], c=y, alpha=0.5, edgecolors='none', cmap=plt.cm.Spectral, s=45)
plt.xlim(X[:, 0].min(), X[:, 0].max())
plt.ylim(X[:, 1].min(), X[:, 1].max())
plt.show()
import sklearn.ensemble
clf = sklearn.ensemble.RandomForestClassifier()
clf.fit(X, y)
h = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),
np.linspace(y_min, y_max, 100))
# Z = []
data = np.c_[xx.ravel(), yy.ravel()]
Z = clf.predict(data)
# Z = np.array(Z)
Z = Z.reshape(xx.shape)
_, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(14, 6))
ax1.scatter(X[:, 0], X[:, 1], c=y, alpha=0.5, edgecolors='none', s=45, cmap=plt.cm.Spectral)
ax2.contourf(xx, yy, Z, alpha=0.5, cmap=plt.cm.Spectral)
ax2.scatter(X[:, 0], X[:, 1], c=y, alpha=0.5, edgecolors='none', cmap=plt.cm.Spectral, s=45)
plt.xlim(X[:, 0].min(), X[:, 0].max())
plt.ylim(X[:, 1].min(), X[:, 1].max())
plt.show() | en | 0.206156 | #cls = RandomForestClassifier(max_depth=5, n_trees=120, n_trials=1) #cls.fit(X, y) #print(cls.predict(T)) #X, y = make_blobs(n_samples=300, centers=4, #random_state=0, cluster_std=1.0) #cls.visualize('./test.png') # Z = [] # Z = np.array(Z) # Z = [] # Z = np.array(Z) | 2.59841 | 3 |
fbchat/_core.py | googlesky/fbchat | 0 | 6612565 | <filename>fbchat/_core.py
import sys
import attr
import logging
log = logging.getLogger("fbchat")
# Enable kw_only if the python version supports it
kw_only = sys.version_info[:2] > (3, 5)
#: Default attrs settings for classes
attrs_default = attr.s(slots=True, kw_only=kw_only)
# Frozen, so that it can be used in sets
@attr.s(frozen=True, slots=True, kw_only=kw_only)
class Image:
#: URL to the image
url = attr.ib(type=str)
#: Width of the image
width = attr.ib(None, type=int)
#: Height of the image
height = attr.ib(None, type=int)
@classmethod
def _from_uri(cls, data):
return cls(
url=data["uri"],
width=int(data["width"]) if data.get("width") else None,
height=int(data["height"]) if data.get("height") else None,
)
@classmethod
def _from_url(cls, data):
return cls(
url=data["url"],
width=int(data["width"]) if data.get("width") else None,
height=int(data["height"]) if data.get("height") else None,
)
@classmethod
def _from_uri_or_none(cls, data):
if data is None:
return None
if data.get("uri") is None:
return None
return cls._from_uri(data)
@classmethod
def _from_url_or_none(cls, data):
if data is None:
return None
if data.get("url") is None:
return None
return cls._from_url(data)
| <filename>fbchat/_core.py
import sys
import attr
import logging
log = logging.getLogger("fbchat")
# Enable kw_only if the python version supports it
kw_only = sys.version_info[:2] > (3, 5)
#: Default attrs settings for classes
attrs_default = attr.s(slots=True, kw_only=kw_only)
# Frozen, so that it can be used in sets
@attr.s(frozen=True, slots=True, kw_only=kw_only)
class Image:
#: URL to the image
url = attr.ib(type=str)
#: Width of the image
width = attr.ib(None, type=int)
#: Height of the image
height = attr.ib(None, type=int)
@classmethod
def _from_uri(cls, data):
return cls(
url=data["uri"],
width=int(data["width"]) if data.get("width") else None,
height=int(data["height"]) if data.get("height") else None,
)
@classmethod
def _from_url(cls, data):
return cls(
url=data["url"],
width=int(data["width"]) if data.get("width") else None,
height=int(data["height"]) if data.get("height") else None,
)
@classmethod
def _from_uri_or_none(cls, data):
if data is None:
return None
if data.get("uri") is None:
return None
return cls._from_uri(data)
@classmethod
def _from_url_or_none(cls, data):
if data is None:
return None
if data.get("url") is None:
return None
return cls._from_url(data)
| en | 0.771881 | # Enable kw_only if the python version supports it #: Default attrs settings for classes # Frozen, so that it can be used in sets #: URL to the image #: Width of the image #: Height of the image | 2.289089 | 2 |
POP1/worksheets/on-lists/ex02/code.py | silvafj/BBK-MSCCS-2017-18 | 1 | 6612566 | <reponame>silvafj/BBK-MSCCS-2017-18
n = int(input())
a = [["." for j in range(n)] for i in range(n)]
middle = n // 2
for i in range(n):
a[i][middle] = a[middle][i] = "*"
a[i][i] = a[i][n-i-1] = "*"
for row in a:
print(' '.join(row))
| n = int(input())
a = [["." for j in range(n)] for i in range(n)]
middle = n // 2
for i in range(n):
a[i][middle] = a[middle][i] = "*"
a[i][i] = a[i][n-i-1] = "*"
for row in a:
print(' '.join(row)) | none | 1 | 3.596593 | 4 | |
pohmm_keystroke/classify.py | vmonaco/pohmm-keystroke | 6 | 6612567 | import numpy as np
import pandas as pd
from pohmm import Pohmm
from scipy import interp
from itertools import chain
from scipy.stats import wilcoxon
from sklearn.svm import OneClassSVM
from sklearn.mixture import GMM
from sklearn.metrics import auc, accuracy_score
from .io import load_data, load_results, save_results, ProgressBar
from .data import preprocess_data, MOBILE_SENSORS, DATASETS
from .plotting import *
def leave_one_out(samples_per_user):
folds = []
for i in range(samples_per_user):
folds.append((np.r_[np.arange(i), np.arange(i + 1, samples_per_user)],
np.r_[i],
np.r_[i]))
return folds
VALIDATION = {
'password': [(np.arange(150, 200), np.arange(200, 400), np.arange(200, 400))],
'keypad': leave_one_out(20),
'fixed_text': leave_one_out(4),
'free_text': leave_one_out(6),
'mobile': leave_one_out(20)
}
def pohmm_factory(df):
emissions = []
for col in df.columns.difference(['event']):
if col in ['tau', 'duration']:
emissions.append((col, 'lognormal'))
else:
emissions.append((col, 'normal'))
hmm = Pohmm(n_hidden_states=2, init_spread=2, thresh=1e-6, max_iter=1000,
emissions=emissions, smoothing='freq')
hmm.fit_df(list(zip(*df.groupby(level=[0, 1])))[1])
return hmm
def stratified_kfold(df, nfolds):
"""
Create stratified k-folds
"""
sessions = pd.DataFrame.from_records(list(df.index.unique())).groupby(0).apply(lambda x: x[1].unique())
sessions.apply(lambda x: np.random.shuffle(x))
folds = []
for i in range(nfolds):
idx = sessions.apply(lambda x: pd.Series(x[i * (len(x) / nfolds):(i + 1) * (len(x) / nfolds)]))
idx = pd.DataFrame(idx.stack().reset_index(level=1, drop=True)).set_index(0, append=True).index.values
folds.append(df.loc[idx])
return folds
def cv_session_scores(folds, model_factory):
"""
Obtain identification and verification results using stratified k-fold cross validation and a model that scores a sample
fit_model_fn should be a function that takes all the samples from a single user and returns a fitted model
score_model_fn should be a function that takes a model and a single sample and scores the sample for the model
"""
results = []
n_folds = len(folds)
for i in range(n_folds):
print('\nFold %d of %d' % (i + 1, n_folds))
reference, genuine, impostor = folds[i]
reference_users = reference.index.get_level_values(0).unique()
work_done = 0
work = len(reference_users) + len(genuine.index.unique()) + len(impostor.index.unique())
progress = ProgressBar(work)
models = {}
for reference_user, reference_data in reference.groupby(level=[0]):
models[reference_user] = model_factory(reference_data)
work_done += 1
progress.animate(work_done)
for (reference_user, query_user, query_session), query_data in chain(genuine.groupby(level=[0, 1, 2]),
impostor.groupby(level=[0, 1, 2])):
results.append((i, reference_user, query_user, query_session,
models[reference_user].score_df(query_data)))
work_done += 1
progress.animate(work_done)
print()
scores = pd.DataFrame(results, columns=['fold', 'reference_user', 'query_user', 'query_session', 'score'])
# scores.set_index(['fold','reference_user','query_user','query_session'], inplace=True)
return scores
def model_scores(df, model):
if df.index.nlevels > 1:
level = np.arange(df.index.nlevels).tolist()
else:
level = 0
def loglik(x):
m = model(x)
return m.logprob_
scores = df.groupby(level=level).apply(loglik)
scores = pd.DataFrame(scores)
scores.columns = ['loglik']
return scores
def cv_event_scores(folds, model, show_progress=True):
"""
Obtain identification and verification results using stratified k-fold cross validation and a model that scores a sample
Creates a dataframe with cols: fold, reference_user, query_user, query_session, event_idx
Args:
folds: list of folds
model: function that takes all the samples from a single user and returns a fitted model
"""
scores = []
n_folds = len(folds)
for i in range(n_folds):
if show_progress:
print('\nFold %d of %d' % (i + 1, n_folds))
reference, genuine, impostor = folds[i]
reference_users = reference.index.get_level_values(0).unique()
work_done = 0
work = len(reference_users) + len(genuine.index.unique()) + len(impostor.index.unique())
progress = ProgressBar(work)
if show_progress:
progress.animate(work_done)
models = {}
for reference_user, reference_data in reference.groupby(level=[0]):
models[reference_user] = model(reference_data)
work_done += 1
if show_progress:
progress.animate(work_done)
for (reference_user, query_user, query_session), query_data in chain(genuine.groupby(level=[0, 1, 2]),
impostor.groupby(level=[0, 1, 2])):
score = models[reference_user].score_events_df(query_data.reset_index(drop=True))
state = models[reference_user].predict_states_df(query_data.reset_index(drop=True))
df = pd.DataFrame({'fold': i,
'reference_user': reference_user,
'query_user': query_user,
'query_session': query_session,
'event_idx': np.arange(len(query_data)),
'event': query_data['event'].values,
'score': score['score'],
'state': state['state'],
},
columns=['fold', 'reference_user', 'query_user', 'query_session', 'event_idx',
'event', 'score', 'state'])
scores.append(df)
work_done += 1
if show_progress:
progress.animate(work_done)
scores = pd.concat(scores).reset_index(drop=True)
scores['rank'] = scores.groupby(['fold', 'query_user',
'query_session', 'event_idx'])['score'].rank(ascending=False) - 1
return scores
def normalize_session_scores(session_scores, pivot=['fold', 'query_user', 'query_session'], method='minmax', h=2):
def _norm(df):
if method is None:
df['nscore'] = df['score']
return df
if method == 'minmax':
lower = df['score'].min()
upper = df['score'].max()
elif method == 'stddev':
lower = df['score'].mean() - h * df['score'].std()
upper = df['score'].mean() + h * df['score'].std()
df['nscore'] = np.minimum(np.maximum((df['score'] - lower) / (upper - lower), 0), 1)
return df
session_scores = session_scores.groupby(pivot).apply(_norm)
return session_scores
def session_identification(session_scores):
"""
"""
ide = session_scores.groupby(['fold', 'query_user', 'query_session']).apply(
lambda x: x.iloc[np.argmax(x['score'].values)][['reference_user']])
ide.columns = ['result']
ide = ide.reset_index()
return ide
def roc_curve(y_true, y_score):
"""
See sklearn.metrics.roc_curve
"""
from sklearn.metrics import roc_curve as _roc_curve
fpr, tpr, thresholds = _roc_curve(y_true, y_score, drop_intermediate=True)
return fpr, 1 - tpr, thresholds
def session_roc(session_scores, pivot='fold'):
"""
"""
# Generate an ROC curve for each fold, ordered by increasing threshold
roc = session_scores.groupby(pivot).apply(
lambda x: pd.DataFrame(np.c_[roc_curve((x['query_user'] == x['reference_user']).values.astype(np.int32),
x['nscore'].values.astype(np.float32))][::-1],
columns=['far', 'frr', 'threshold']))
# interpolate to get the same threshold values in each fold
thresholds = np.sort(roc['threshold'].unique())
roc = roc.groupby(level=pivot).apply(lambda x: pd.DataFrame(np.c_[thresholds,
interp(thresholds, x['threshold'], x['far']),
interp(thresholds, x['threshold'], x['frr'])],
columns=['threshold', 'far', 'frr']))
roc = roc.reset_index(level=1, drop=True).reset_index()
return roc
def continuous_identification(scores):
"""
"""
ide = scores.groupby(['fold', 'query_user', 'query_session', 'event_idx']).apply(
lambda x: x.iloc[np.argmax(x['score'].values)][['reference_user']])
ide.columns = ['result']
ide = ide.reset_index()
return ide
def scores_penalty(scores, penalty_fun='sum', window=25):
"""
"""
def _penalty(df):
if penalty_fun == 'sum':
p = df['rank'].rolling(window=window, center=False).sum()
p[:window] = df['rank'].values[:window].cumsum()
elif penalty_fun == 'sumexp':
p = (np.exp(df['rank']) - 1).rolling(window=window, center=False).sum()
p[:window] = (np.exp(df['rank']) - 1)[:window].cumsum()
df['penalty'] = p
return df
penalty = scores.copy().groupby(['fold', 'reference_user', 'query_user', 'query_session']).apply(_penalty)
return penalty
def continuous_verification(penalty):
"""
Determine the maximum lockout time for each impostor/query sample
"""
genuine_idx = penalty['reference_user'] == penalty['query_user']
genuine = penalty[genuine_idx]
lockout = genuine.groupby(['query_user', 'query_session']).max()[['penalty']]
lockout = pd.DataFrame(lockout)
lockout.columns = ['threshold']
impostor = penalty[~genuine_idx]
def _mrt(df):
# thresh = lockout.loc[tuple(df.iloc[0][['query_user', 'query_session']].values)].squeeze()
thresh = 645
reject = (df['penalty'] > thresh)
return np.where(reject)[0].min() if reject.any() else len(reject)
mrt = impostor.groupby(['reference_user', 'query_user', 'query_session']).apply(_mrt).reset_index()
mrt.columns = ['reference_user', 'query_user', 'query_session', 'mrt']
amrt = mrt.groupby(['query_user', 'query_session'])['mrt'].mean()
amrt.columns = ['amrt']
results = pd.concat([amrt, lockout], axis=1).reset_index()
return results
def continuous_verification(penalty):
"""
Determine the maximum lockout time for each impostor/query sample
"""
genuine_idx = penalty['reference_user'] == penalty['query_user']
genuine = penalty[genuine_idx]
lockout = genuine.groupby(['query_user', 'query_session']).max()[['penalty']]
lockout = pd.DataFrame(lockout)
lockout.columns = ['threshold']
impostor = penalty[genuine_idx == False]
def _mrt(df):
thresh = lockout.loc[tuple(df.iloc[0][['query_user', 'query_session']].values)].squeeze()
reject = (df['penalty'] > thresh)
return np.where(reject)[0].min() if reject.any() else len(reject)
mrt = impostor.groupby(['reference_user', 'query_user', 'query_session']).apply(_mrt).reset_index()
mrt.columns = ['reference_user', 'query_user', 'query_session', 'mrt']
amrt = mrt.groupby(['query_user', 'query_session'])['mrt'].mean()
amrt.columns = ['amrt']
results = pd.concat([amrt, lockout], axis=1).reset_index()
return results
def ACC(ide):
"""
Obtain rank-n classification accuracy for each fold
"""
return accuracy_score(ide['query_user'].values, ide['result'].values)
def EER(roc):
"""
Obtain the EER for one fold
"""
far, frr = roc['far'].values, roc['frr'].values
def perp(a):
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
# line segment a given by endpoints a1, a2
# line segment b given by endpoints b1, b2
def seg_intersect(a1, a2, b1, b2):
da = a2 - a1
db = b2 - b1
dp = a1 - b1
dap = perp(da)
denom = np.dot(dap, db)
num = np.dot(dap, dp)
return (num / denom) * db + b1
d = far <= frr
idx = np.diff(d).nonzero()[0][0]
return seg_intersect(np.array([idx, far[idx]]),
np.array([idx + 1, far[idx + 1]]),
np.array([idx, frr[idx]]),
np.array([idx + 1, frr[idx + 1]]))[1]
def AUC(roc):
"""
Area under the ROC curve
"""
return auc(roc['frr'].values, roc['far'].values)
def SMAPE(ground_truth, predictions):
"""
Symmetric mean absolute prediction error
"""
return np.abs((ground_truth - predictions) / (ground_truth + predictions))
def split_dataset(df, template_reps, genuine_reps, impostor_reps):
df_template = df[df.index.get_level_values(1).isin(template_reps)]
df_genuine = df[df.index.get_level_values(1).isin(genuine_reps)]
df_impostor = df[df.index.get_level_values(1).isin(impostor_reps)]
df_genuine.index.names = ['reference_user', 'session']
df_genuine = df_genuine.reset_index()
df_genuine['query_user'] = df_genuine['reference_user']
df_genuine = df_genuine.set_index(['reference_user', 'query_user', 'session'])
df_impostor.index.names = ['reference_user', 'session']
df_impostor = df_impostor.reset_index()
df_impostor['query_user'] = df_impostor['reference_user']
df_impostor = df_impostor.set_index(['reference_user', 'query_user', 'session'])
dfs_impostor = []
for user in df.index.get_level_values(0).unique():
df_tmp = df_impostor.drop(user, level=0).reset_index().copy()
df_tmp['reference_user'] = user
dfs_impostor.append(df_tmp)
df_impostor = pd.concat(dfs_impostor).set_index(['reference_user', 'query_user', 'session'])
return df_template, df_genuine, df_impostor
def dataset_classification_results(dataset, event, features=['tau', 'duration'],
model_factory_fn=pohmm_factory, out_name=None):
"""
Obtain results for a given dataset and features conditioned on the event column.
"""
print('Running:', out_name, flush=True)
# Load and preprocess the dataset
df = load_data(dataset)
df = preprocess_data(df, event, features)
# Create the validation folds
folds = [split_dataset(df, *sessions) for sessions in VALIDATION[dataset]]
scores = cv_event_scores(folds, model_factory_fn)
save_results(scores, out_name + '_event_scores')
# Aggregate and normalize the event scores within each session
session_scores = scores.groupby(['fold', 'reference_user',
'query_user', 'query_session'])['score'].sum().reset_index()
session_scores = normalize_session_scores(session_scores)
save_results(session_scores, out_name + '_session_scores')
# Session and continuous identification, verification results
session_ide = session_identification(session_scores)
session_ver = session_roc(session_scores)
continuous_ide = continuous_identification(scores) # Identification of each event
penalty = scores_penalty(scores)
continuous_ver = continuous_verification(penalty) # Minimum rejection time
# Summarize of session results
session_acc = session_ide.groupby('fold').apply(ACC).describe()
session_eer = session_ver.groupby('fold').apply(EER).describe()
session_auc = session_ver.groupby('fold').apply(AUC).describe()
# User-dependent EER is obtained by deriving an ROC curve for each user
user_eer = session_roc(session_scores, pivot='reference_user').groupby('reference_user').apply(EER).describe()
user_acc = session_ide.groupby('query_user').apply(ACC).describe()
# Summarize continuous results, CI by session
continuous_acc = continuous_ide.groupby(['query_user', 'query_session']).apply(ACC).describe()
# Maximum lockout time, averaged for each session (against all reference users), CI by session
continuous_amrt = continuous_ver['amrt'].describe()
summary = pd.concat([session_acc, user_acc, session_eer, user_eer, session_auc, continuous_acc, continuous_amrt],
axis=1)
summary.columns = ['ACC', 'U-ACC', 'EER', 'U-EER', 'AUC', 'CIA', 'AMRT']
save_results(summary, out_name + '_summary')
print(summary)
event_scores = load_results(out_name + '_event_scores')
penalty = scores_penalty(event_scores)
# Plot a penalty function example
penalty = penalty.set_index(['query_user', 'query_session'])
penalty_example = penalty.loc[np.random.choice(penalty.index.unique())].reset_index()
plot_penalty_example(penalty_example)
save_fig(out_name + '_penalty_example')
plot_penalty_distribution_example(penalty_example)
save_fig(out_name + '_penalty_distribution_example')
# plot the error and ROC curves
plot_error(session_ver)
save_fig(out_name + '_error')
plot_roc(session_ver)
save_fig(out_name + '_roc')
return
def dataset_prediction_results(dataset, event, model_factory_fn=pohmm_factory,
min_history=90, max_history=None, out_name=None):
"""
Obtain predictions for each model.
Create stratified folds
Train on 1-n_folds. Use the last fold to make predictions for each event
"""
print('Running:', out_name, flush=True)
# Load and preprocess the dataset
df = load_data(dataset)
# from .data import reduce_dataset
# df = reduce_dataset(df, num_users=5, min_samples=1, max_samples=1)
df = preprocess_data(df, event, ['tau'])
# fold, ref user, query user, query session, into future, event, ground truth, prediction
baseline_col = 'baseline_tau'
prediction_col = 'prediction_tau'
work_done = 0
work = len(df.index.unique())
progress = ProgressBar(work)
progress.animate(work_done)
def _predictions(df):
if max_history is None:
upper = len(df) - 1
else:
upper = min(max_history, len(df) - 1)
results = []
for i in range(min_history, upper + 1):
hmm = model_factory_fn(df[:i])
pred = hmm.predict_df(df[:i], next_pstate=df.iloc[i]['event'])[0]
# pred = hmm.predict_df(df[:i])[0]
baseline_pred = df['tau'].values[:i].mean(axis=0)
results.append([i, df.iloc[i]['event'], df.iloc[i]['tau'], pred, baseline_pred])
nonlocal work_done
work_done += 1
progress.animate(work_done)
results = pd.DataFrame(results, columns=['event_idx', 'event', 'tau', prediction_col, baseline_col])
return results
pred = df.groupby(level=[0, 1]).apply(_predictions)
pred['SMAPE_tau'] = SMAPE(pred['tau'], pred[prediction_col])
pred['SMAPE_baseline_tau'] = SMAPE(pred['tau'], pred[baseline_col])
pred = pred.reset_index(level=df.index.nlevels, drop=True)
save_results(pred, out_name + '_predictions')
return
def manhattan_factory(df):
class Classifier(object):
def fit_df(self, df):
self.template = df.mean(axis=0)
def score_df(self, df):
return - (self.template - df).abs().sum(axis=1).values.squeeze()
clf = Classifier()
clf.fit_df(df)
return clf
def svm_factory(df):
class Classifier(object):
def fit_df(self, df):
self.model = OneClassSVM()
self.model.fit(df.values)
def score_df(self, df):
return self.model.decision_function(df.values).squeeze()
clf = Classifier()
clf.fit_df(df)
return clf
def gmm_factory(df):
class Classifier(object):
def fit_df(self, df):
df = df[df.columns.difference(['event'])]
n_components = int(round(np.sqrt(df.groupby(level=[0, 1]).size().mean())))
self.model = GMM(n_components=n_components, covariance_type='spherical', min_covar=0.01)
self.model.fit(df.values)
def score_events_df(self, df):
df = df[df.columns.difference(['event'])]
df['score'] = self.model.score(df.values)
return df
def predict_states_df(self, df):
df['state'] = 0
return df
clf = Classifier()
clf.fit_df(df)
return clf
def feature_vector_results(dataset, features, model_factory, out_name):
print('Running:', out_name, flush=True)
df = load_data(features)
folds = [split_dataset(df, *sessions) for sessions in VALIDATION[dataset]]
scores = cv_session_scores(folds, model_factory)
session_scores = normalize_session_scores(scores)
save_results(session_scores, out_name + '_session_scores')
# Session and continuous identification, verification results
session_ide = session_identification(session_scores)
session_ver = session_roc(session_scores)
# Summarize of session results
session_acc = session_ide.groupby('fold').apply(ACC).describe()
session_eer = session_ver.groupby('fold').apply(EER).describe()
session_auc = session_ver.groupby('fold').apply(AUC).describe()
# User-dependent EER is obtained by deriving an ROC curve for each user
user_eer = session_roc(session_scores, pivot='reference_user').groupby('reference_user').apply(EER).describe()
user_acc = session_ide.groupby('query_user').apply(ACC).describe()
summary = pd.concat([session_acc, user_acc, session_eer, user_eer, session_auc], axis=1)
summary.columns = ['ACC', 'U-ACC', 'EER', 'U-EER', 'AUC']
save_results(summary, out_name + '_summary')
print(summary)
def classification_results(seed=1234):
np.random.seed(seed)
for dataset in DATASETS:
dataset_classification_results(dataset, 'keyname', out_name='%s_pohmm' % dataset)
dataset_classification_results(dataset, 'none', out_name='%s_hmm' % dataset)
dataset_classification_results('mobile', 'keyname',
features=['tau', 'duration'] + MOBILE_SENSORS,
out_name='mobile_sensor_pohmm')
dataset_classification_results('mobile', 'none',
features=['tau', 'duration'] + MOBILE_SENSORS,
out_name='mobile_sensor_hmm')
for dataset in ['fixed_text', 'free_text']: #DATASETS:
# feature_vector_results(dataset, '%s_features' % dataset, manhattan_factory, out_name='%s_manhattan' % dataset)
feature_vector_results(dataset, '%s_scaled_features' % dataset, manhattan_factory,
out_name='%s_scaled_manhattan' % dataset)
feature_vector_results(dataset, '%s_normed_features' % dataset, svm_factory, out_name='%s_svm' % dataset)
feature_vector_results('mobile', 'mobile_sensor_features', manhattan_factory,
out_name='mobile_sensor_manhattan')
feature_vector_results('mobile', 'mobile_sensor_scaled_features', manhattan_factory,
out_name='mobile_sensor_scaled_manhattan')
feature_vector_results('mobile', 'mobile_sensor_normed_features', svm_factory,
out_name='mobile_sensor_svm')
def prediction_results(seed=1234):
np.random.seed(seed)
dataset_prediction_results('fixed_text', 'keyname', out_name='fixed_text_pohmm', min_history=50, max_history=None)
dataset_prediction_results('fixed_text', 'none', out_name='fixed_text_hmm', min_history=50, max_history=None)
np.random.seed(seed)
dataset_prediction_results('free_text', 'keyname', out_name='free_text_pohmm', min_history=450, max_history=None)
dataset_prediction_results('free_text', 'none', out_name='free_text_hmm', min_history=450, max_history=None)
def plot_pohmm_example(dataset, seed=1234):
np.random.seed(seed)
df = load_data(dataset)
df = df[df.index.get_level_values(0) == np.random.choice(df.index.get_level_values(0).unique())]
df = preprocess_data(df, 'keyname', ['tau'])
m = pohmm_factory(df)
plot_model_empirical_pdf(df, m, 1000)
save_fig('%s_pohmm_example' % dataset)
def plot_montecarlo_hmm_vs_pohmm(dataset):
hmm_pvalues = load_results('%s_hmm_montecarlo_pvalues' % dataset)
pohmm_pvalues = load_results('%s_pohmm_montecarlo_pvalues' % dataset)
plot_hmm_vs_pohmm_pvalues(hmm_pvalues, pohmm_pvalues)
save_fig('%s_hmm_vs_pohmm_pvalues' % dataset)
def plot_roc_curves_hmm_vs_pohmm(dataset):
if dataset == 'password':
pivot = 'reference_user'
else:
pivot = 'fold'
manhattan_roc = session_roc(load_results('%s_manhattan_session_scores' % dataset), pivot)
scaled_manhattan_roc = session_roc(load_results('%s_scaled_manhattan_session_scores' % dataset), pivot)
one_class_svm = session_roc(load_results('%s_svm_session_scores' % dataset), pivot)
hmm_roc = session_roc(load_results('%s_hmm_session_scores' % dataset), pivot)
pohmm_roc = session_roc(load_results('%s_pohmm_session_scores' % dataset), pivot)
plot_roc([('Manhattan', manhattan_roc),
('Manhattan (scaled)', scaled_manhattan_roc),
('SVM (one-class)', one_class_svm),
('HMM', hmm_roc),
('POHMM', pohmm_roc)], 'Model', pivot)
save_fig(dataset + '_roc')
def summary_table(m, threshold=0.05):
rows = []
if m == 'AMRT':
SYSTEMS = ['hmm', 'pohmm']
COLUMNS = ['dataset', 'HMM', 'POHMM']
else:
SYSTEMS = ['manhattan', 'scaled_manhattan', 'svm', 'hmm', 'pohmm']
COLUMNS = ['dataset', 'Manhattan', 'Manhattan (scaled)', 'SVM (one-class)', 'HMM', 'POHMM']
for dataset in ['password', 'keypad', 'mobile', 'mobile_sensor', 'fixed_text', 'free_text']:
row = []
if ((m == 'EER') or (m == 'ACC')) and (dataset == 'password'):
measure = 'U-' + m
else:
measure = m
means = []
system_measures = []
for system in SYSTEMS:
session_scores = load_results('%s_%s_session_scores' % (dataset, system))
if measure == 'U-ACC':
measures = session_identification(session_scores).groupby('query_user').apply(ACC)
elif measure == 'U-EER':
measures = session_roc(session_scores, pivot='reference_user').groupby('reference_user').apply(EER)
elif measure == 'ACC':
measures = session_identification(session_scores).groupby('fold').apply(ACC)
elif measure == 'EER':
measures = session_roc(session_scores, pivot='fold').groupby('fold').apply(EER)
elif measure == 'AMRT':
scores = load_results('%s_%s_event_scores' % (dataset, system))
penalty = scores_penalty(scores)
continuous_ver = continuous_verification(penalty)
measures = continuous_ver['amrt']
system_measures.append(measures.values)
means.append(measures.mean())
row.append('%.3f (%.3f)' % (measures.mean(), measures.std()))
means = np.array(means)
if 'ACC' in measure:
idx = np.argmax(means)
else:
idx = np.argmin(means)
row[idx] = '*' + row[idx] + '*'
for i in range(len(system_measures)):
if i == idx:
continue
_, pvalue = wilcoxon(system_measures[idx], system_measures[i])
if pvalue > threshold/(len(system_measures) - 1):
row[i] = '*' + row[i] + '*'
rows.append([dataset] + row)
df = pd.DataFrame(rows, columns=COLUMNS)
df = df.set_index('dataset')
save_results(df, 'summary_%s' % m)
| import numpy as np
import pandas as pd
from pohmm import Pohmm
from scipy import interp
from itertools import chain
from scipy.stats import wilcoxon
from sklearn.svm import OneClassSVM
from sklearn.mixture import GMM
from sklearn.metrics import auc, accuracy_score
from .io import load_data, load_results, save_results, ProgressBar
from .data import preprocess_data, MOBILE_SENSORS, DATASETS
from .plotting import *
def leave_one_out(samples_per_user):
folds = []
for i in range(samples_per_user):
folds.append((np.r_[np.arange(i), np.arange(i + 1, samples_per_user)],
np.r_[i],
np.r_[i]))
return folds
VALIDATION = {
'password': [(np.arange(150, 200), np.arange(200, 400), np.arange(200, 400))],
'keypad': leave_one_out(20),
'fixed_text': leave_one_out(4),
'free_text': leave_one_out(6),
'mobile': leave_one_out(20)
}
def pohmm_factory(df):
emissions = []
for col in df.columns.difference(['event']):
if col in ['tau', 'duration']:
emissions.append((col, 'lognormal'))
else:
emissions.append((col, 'normal'))
hmm = Pohmm(n_hidden_states=2, init_spread=2, thresh=1e-6, max_iter=1000,
emissions=emissions, smoothing='freq')
hmm.fit_df(list(zip(*df.groupby(level=[0, 1])))[1])
return hmm
def stratified_kfold(df, nfolds):
"""
Create stratified k-folds
"""
sessions = pd.DataFrame.from_records(list(df.index.unique())).groupby(0).apply(lambda x: x[1].unique())
sessions.apply(lambda x: np.random.shuffle(x))
folds = []
for i in range(nfolds):
idx = sessions.apply(lambda x: pd.Series(x[i * (len(x) / nfolds):(i + 1) * (len(x) / nfolds)]))
idx = pd.DataFrame(idx.stack().reset_index(level=1, drop=True)).set_index(0, append=True).index.values
folds.append(df.loc[idx])
return folds
def cv_session_scores(folds, model_factory):
"""
Obtain identification and verification results using stratified k-fold cross validation and a model that scores a sample
fit_model_fn should be a function that takes all the samples from a single user and returns a fitted model
score_model_fn should be a function that takes a model and a single sample and scores the sample for the model
"""
results = []
n_folds = len(folds)
for i in range(n_folds):
print('\nFold %d of %d' % (i + 1, n_folds))
reference, genuine, impostor = folds[i]
reference_users = reference.index.get_level_values(0).unique()
work_done = 0
work = len(reference_users) + len(genuine.index.unique()) + len(impostor.index.unique())
progress = ProgressBar(work)
models = {}
for reference_user, reference_data in reference.groupby(level=[0]):
models[reference_user] = model_factory(reference_data)
work_done += 1
progress.animate(work_done)
for (reference_user, query_user, query_session), query_data in chain(genuine.groupby(level=[0, 1, 2]),
impostor.groupby(level=[0, 1, 2])):
results.append((i, reference_user, query_user, query_session,
models[reference_user].score_df(query_data)))
work_done += 1
progress.animate(work_done)
print()
scores = pd.DataFrame(results, columns=['fold', 'reference_user', 'query_user', 'query_session', 'score'])
# scores.set_index(['fold','reference_user','query_user','query_session'], inplace=True)
return scores
def model_scores(df, model):
if df.index.nlevels > 1:
level = np.arange(df.index.nlevels).tolist()
else:
level = 0
def loglik(x):
m = model(x)
return m.logprob_
scores = df.groupby(level=level).apply(loglik)
scores = pd.DataFrame(scores)
scores.columns = ['loglik']
return scores
def cv_event_scores(folds, model, show_progress=True):
"""
Obtain identification and verification results using stratified k-fold cross validation and a model that scores a sample
Creates a dataframe with cols: fold, reference_user, query_user, query_session, event_idx
Args:
folds: list of folds
model: function that takes all the samples from a single user and returns a fitted model
"""
scores = []
n_folds = len(folds)
for i in range(n_folds):
if show_progress:
print('\nFold %d of %d' % (i + 1, n_folds))
reference, genuine, impostor = folds[i]
reference_users = reference.index.get_level_values(0).unique()
work_done = 0
work = len(reference_users) + len(genuine.index.unique()) + len(impostor.index.unique())
progress = ProgressBar(work)
if show_progress:
progress.animate(work_done)
models = {}
for reference_user, reference_data in reference.groupby(level=[0]):
models[reference_user] = model(reference_data)
work_done += 1
if show_progress:
progress.animate(work_done)
for (reference_user, query_user, query_session), query_data in chain(genuine.groupby(level=[0, 1, 2]),
impostor.groupby(level=[0, 1, 2])):
score = models[reference_user].score_events_df(query_data.reset_index(drop=True))
state = models[reference_user].predict_states_df(query_data.reset_index(drop=True))
df = pd.DataFrame({'fold': i,
'reference_user': reference_user,
'query_user': query_user,
'query_session': query_session,
'event_idx': np.arange(len(query_data)),
'event': query_data['event'].values,
'score': score['score'],
'state': state['state'],
},
columns=['fold', 'reference_user', 'query_user', 'query_session', 'event_idx',
'event', 'score', 'state'])
scores.append(df)
work_done += 1
if show_progress:
progress.animate(work_done)
scores = pd.concat(scores).reset_index(drop=True)
scores['rank'] = scores.groupby(['fold', 'query_user',
'query_session', 'event_idx'])['score'].rank(ascending=False) - 1
return scores
def normalize_session_scores(session_scores, pivot=['fold', 'query_user', 'query_session'], method='minmax', h=2):
def _norm(df):
if method is None:
df['nscore'] = df['score']
return df
if method == 'minmax':
lower = df['score'].min()
upper = df['score'].max()
elif method == 'stddev':
lower = df['score'].mean() - h * df['score'].std()
upper = df['score'].mean() + h * df['score'].std()
df['nscore'] = np.minimum(np.maximum((df['score'] - lower) / (upper - lower), 0), 1)
return df
session_scores = session_scores.groupby(pivot).apply(_norm)
return session_scores
def session_identification(session_scores):
"""
"""
ide = session_scores.groupby(['fold', 'query_user', 'query_session']).apply(
lambda x: x.iloc[np.argmax(x['score'].values)][['reference_user']])
ide.columns = ['result']
ide = ide.reset_index()
return ide
def roc_curve(y_true, y_score):
"""
See sklearn.metrics.roc_curve
"""
from sklearn.metrics import roc_curve as _roc_curve
fpr, tpr, thresholds = _roc_curve(y_true, y_score, drop_intermediate=True)
return fpr, 1 - tpr, thresholds
def session_roc(session_scores, pivot='fold'):
"""
"""
# Generate an ROC curve for each fold, ordered by increasing threshold
roc = session_scores.groupby(pivot).apply(
lambda x: pd.DataFrame(np.c_[roc_curve((x['query_user'] == x['reference_user']).values.astype(np.int32),
x['nscore'].values.astype(np.float32))][::-1],
columns=['far', 'frr', 'threshold']))
# interpolate to get the same threshold values in each fold
thresholds = np.sort(roc['threshold'].unique())
roc = roc.groupby(level=pivot).apply(lambda x: pd.DataFrame(np.c_[thresholds,
interp(thresholds, x['threshold'], x['far']),
interp(thresholds, x['threshold'], x['frr'])],
columns=['threshold', 'far', 'frr']))
roc = roc.reset_index(level=1, drop=True).reset_index()
return roc
def continuous_identification(scores):
"""
"""
ide = scores.groupby(['fold', 'query_user', 'query_session', 'event_idx']).apply(
lambda x: x.iloc[np.argmax(x['score'].values)][['reference_user']])
ide.columns = ['result']
ide = ide.reset_index()
return ide
def scores_penalty(scores, penalty_fun='sum', window=25):
"""
"""
def _penalty(df):
if penalty_fun == 'sum':
p = df['rank'].rolling(window=window, center=False).sum()
p[:window] = df['rank'].values[:window].cumsum()
elif penalty_fun == 'sumexp':
p = (np.exp(df['rank']) - 1).rolling(window=window, center=False).sum()
p[:window] = (np.exp(df['rank']) - 1)[:window].cumsum()
df['penalty'] = p
return df
penalty = scores.copy().groupby(['fold', 'reference_user', 'query_user', 'query_session']).apply(_penalty)
return penalty
def continuous_verification(penalty):
"""
Determine the maximum lockout time for each impostor/query sample
"""
genuine_idx = penalty['reference_user'] == penalty['query_user']
genuine = penalty[genuine_idx]
lockout = genuine.groupby(['query_user', 'query_session']).max()[['penalty']]
lockout = pd.DataFrame(lockout)
lockout.columns = ['threshold']
impostor = penalty[~genuine_idx]
def _mrt(df):
# thresh = lockout.loc[tuple(df.iloc[0][['query_user', 'query_session']].values)].squeeze()
thresh = 645
reject = (df['penalty'] > thresh)
return np.where(reject)[0].min() if reject.any() else len(reject)
mrt = impostor.groupby(['reference_user', 'query_user', 'query_session']).apply(_mrt).reset_index()
mrt.columns = ['reference_user', 'query_user', 'query_session', 'mrt']
amrt = mrt.groupby(['query_user', 'query_session'])['mrt'].mean()
amrt.columns = ['amrt']
results = pd.concat([amrt, lockout], axis=1).reset_index()
return results
def continuous_verification(penalty):
"""
Determine the maximum lockout time for each impostor/query sample
"""
genuine_idx = penalty['reference_user'] == penalty['query_user']
genuine = penalty[genuine_idx]
lockout = genuine.groupby(['query_user', 'query_session']).max()[['penalty']]
lockout = pd.DataFrame(lockout)
lockout.columns = ['threshold']
impostor = penalty[genuine_idx == False]
def _mrt(df):
thresh = lockout.loc[tuple(df.iloc[0][['query_user', 'query_session']].values)].squeeze()
reject = (df['penalty'] > thresh)
return np.where(reject)[0].min() if reject.any() else len(reject)
mrt = impostor.groupby(['reference_user', 'query_user', 'query_session']).apply(_mrt).reset_index()
mrt.columns = ['reference_user', 'query_user', 'query_session', 'mrt']
amrt = mrt.groupby(['query_user', 'query_session'])['mrt'].mean()
amrt.columns = ['amrt']
results = pd.concat([amrt, lockout], axis=1).reset_index()
return results
def ACC(ide):
"""
Obtain rank-n classification accuracy for each fold
"""
return accuracy_score(ide['query_user'].values, ide['result'].values)
def EER(roc):
"""
Obtain the EER for one fold
"""
far, frr = roc['far'].values, roc['frr'].values
def perp(a):
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
# line segment a given by endpoints a1, a2
# line segment b given by endpoints b1, b2
def seg_intersect(a1, a2, b1, b2):
da = a2 - a1
db = b2 - b1
dp = a1 - b1
dap = perp(da)
denom = np.dot(dap, db)
num = np.dot(dap, dp)
return (num / denom) * db + b1
d = far <= frr
idx = np.diff(d).nonzero()[0][0]
return seg_intersect(np.array([idx, far[idx]]),
np.array([idx + 1, far[idx + 1]]),
np.array([idx, frr[idx]]),
np.array([idx + 1, frr[idx + 1]]))[1]
def AUC(roc):
"""
Area under the ROC curve
"""
return auc(roc['frr'].values, roc['far'].values)
def SMAPE(ground_truth, predictions):
"""
Symmetric mean absolute prediction error
"""
return np.abs((ground_truth - predictions) / (ground_truth + predictions))
def split_dataset(df, template_reps, genuine_reps, impostor_reps):
df_template = df[df.index.get_level_values(1).isin(template_reps)]
df_genuine = df[df.index.get_level_values(1).isin(genuine_reps)]
df_impostor = df[df.index.get_level_values(1).isin(impostor_reps)]
df_genuine.index.names = ['reference_user', 'session']
df_genuine = df_genuine.reset_index()
df_genuine['query_user'] = df_genuine['reference_user']
df_genuine = df_genuine.set_index(['reference_user', 'query_user', 'session'])
df_impostor.index.names = ['reference_user', 'session']
df_impostor = df_impostor.reset_index()
df_impostor['query_user'] = df_impostor['reference_user']
df_impostor = df_impostor.set_index(['reference_user', 'query_user', 'session'])
dfs_impostor = []
for user in df.index.get_level_values(0).unique():
df_tmp = df_impostor.drop(user, level=0).reset_index().copy()
df_tmp['reference_user'] = user
dfs_impostor.append(df_tmp)
df_impostor = pd.concat(dfs_impostor).set_index(['reference_user', 'query_user', 'session'])
return df_template, df_genuine, df_impostor
def dataset_classification_results(dataset, event, features=['tau', 'duration'],
model_factory_fn=pohmm_factory, out_name=None):
"""
Obtain results for a given dataset and features conditioned on the event column.
"""
print('Running:', out_name, flush=True)
# Load and preprocess the dataset
df = load_data(dataset)
df = preprocess_data(df, event, features)
# Create the validation folds
folds = [split_dataset(df, *sessions) for sessions in VALIDATION[dataset]]
scores = cv_event_scores(folds, model_factory_fn)
save_results(scores, out_name + '_event_scores')
# Aggregate and normalize the event scores within each session
session_scores = scores.groupby(['fold', 'reference_user',
'query_user', 'query_session'])['score'].sum().reset_index()
session_scores = normalize_session_scores(session_scores)
save_results(session_scores, out_name + '_session_scores')
# Session and continuous identification, verification results
session_ide = session_identification(session_scores)
session_ver = session_roc(session_scores)
continuous_ide = continuous_identification(scores) # Identification of each event
penalty = scores_penalty(scores)
continuous_ver = continuous_verification(penalty) # Minimum rejection time
# Summarize of session results
session_acc = session_ide.groupby('fold').apply(ACC).describe()
session_eer = session_ver.groupby('fold').apply(EER).describe()
session_auc = session_ver.groupby('fold').apply(AUC).describe()
# User-dependent EER is obtained by deriving an ROC curve for each user
user_eer = session_roc(session_scores, pivot='reference_user').groupby('reference_user').apply(EER).describe()
user_acc = session_ide.groupby('query_user').apply(ACC).describe()
# Summarize continuous results, CI by session
continuous_acc = continuous_ide.groupby(['query_user', 'query_session']).apply(ACC).describe()
# Maximum lockout time, averaged for each session (against all reference users), CI by session
continuous_amrt = continuous_ver['amrt'].describe()
summary = pd.concat([session_acc, user_acc, session_eer, user_eer, session_auc, continuous_acc, continuous_amrt],
axis=1)
summary.columns = ['ACC', 'U-ACC', 'EER', 'U-EER', 'AUC', 'CIA', 'AMRT']
save_results(summary, out_name + '_summary')
print(summary)
event_scores = load_results(out_name + '_event_scores')
penalty = scores_penalty(event_scores)
# Plot a penalty function example
penalty = penalty.set_index(['query_user', 'query_session'])
penalty_example = penalty.loc[np.random.choice(penalty.index.unique())].reset_index()
plot_penalty_example(penalty_example)
save_fig(out_name + '_penalty_example')
plot_penalty_distribution_example(penalty_example)
save_fig(out_name + '_penalty_distribution_example')
# plot the error and ROC curves
plot_error(session_ver)
save_fig(out_name + '_error')
plot_roc(session_ver)
save_fig(out_name + '_roc')
return
def dataset_prediction_results(dataset, event, model_factory_fn=pohmm_factory,
min_history=90, max_history=None, out_name=None):
"""
Obtain predictions for each model.
Create stratified folds
Train on 1-n_folds. Use the last fold to make predictions for each event
"""
print('Running:', out_name, flush=True)
# Load and preprocess the dataset
df = load_data(dataset)
# from .data import reduce_dataset
# df = reduce_dataset(df, num_users=5, min_samples=1, max_samples=1)
df = preprocess_data(df, event, ['tau'])
# fold, ref user, query user, query session, into future, event, ground truth, prediction
baseline_col = 'baseline_tau'
prediction_col = 'prediction_tau'
work_done = 0
work = len(df.index.unique())
progress = ProgressBar(work)
progress.animate(work_done)
def _predictions(df):
if max_history is None:
upper = len(df) - 1
else:
upper = min(max_history, len(df) - 1)
results = []
for i in range(min_history, upper + 1):
hmm = model_factory_fn(df[:i])
pred = hmm.predict_df(df[:i], next_pstate=df.iloc[i]['event'])[0]
# pred = hmm.predict_df(df[:i])[0]
baseline_pred = df['tau'].values[:i].mean(axis=0)
results.append([i, df.iloc[i]['event'], df.iloc[i]['tau'], pred, baseline_pred])
nonlocal work_done
work_done += 1
progress.animate(work_done)
results = pd.DataFrame(results, columns=['event_idx', 'event', 'tau', prediction_col, baseline_col])
return results
pred = df.groupby(level=[0, 1]).apply(_predictions)
pred['SMAPE_tau'] = SMAPE(pred['tau'], pred[prediction_col])
pred['SMAPE_baseline_tau'] = SMAPE(pred['tau'], pred[baseline_col])
pred = pred.reset_index(level=df.index.nlevels, drop=True)
save_results(pred, out_name + '_predictions')
return
def manhattan_factory(df):
class Classifier(object):
def fit_df(self, df):
self.template = df.mean(axis=0)
def score_df(self, df):
return - (self.template - df).abs().sum(axis=1).values.squeeze()
clf = Classifier()
clf.fit_df(df)
return clf
def svm_factory(df):
class Classifier(object):
def fit_df(self, df):
self.model = OneClassSVM()
self.model.fit(df.values)
def score_df(self, df):
return self.model.decision_function(df.values).squeeze()
clf = Classifier()
clf.fit_df(df)
return clf
def gmm_factory(df):
class Classifier(object):
def fit_df(self, df):
df = df[df.columns.difference(['event'])]
n_components = int(round(np.sqrt(df.groupby(level=[0, 1]).size().mean())))
self.model = GMM(n_components=n_components, covariance_type='spherical', min_covar=0.01)
self.model.fit(df.values)
def score_events_df(self, df):
df = df[df.columns.difference(['event'])]
df['score'] = self.model.score(df.values)
return df
def predict_states_df(self, df):
df['state'] = 0
return df
clf = Classifier()
clf.fit_df(df)
return clf
def feature_vector_results(dataset, features, model_factory, out_name):
print('Running:', out_name, flush=True)
df = load_data(features)
folds = [split_dataset(df, *sessions) for sessions in VALIDATION[dataset]]
scores = cv_session_scores(folds, model_factory)
session_scores = normalize_session_scores(scores)
save_results(session_scores, out_name + '_session_scores')
# Session and continuous identification, verification results
session_ide = session_identification(session_scores)
session_ver = session_roc(session_scores)
# Summarize of session results
session_acc = session_ide.groupby('fold').apply(ACC).describe()
session_eer = session_ver.groupby('fold').apply(EER).describe()
session_auc = session_ver.groupby('fold').apply(AUC).describe()
# User-dependent EER is obtained by deriving an ROC curve for each user
user_eer = session_roc(session_scores, pivot='reference_user').groupby('reference_user').apply(EER).describe()
user_acc = session_ide.groupby('query_user').apply(ACC).describe()
summary = pd.concat([session_acc, user_acc, session_eer, user_eer, session_auc], axis=1)
summary.columns = ['ACC', 'U-ACC', 'EER', 'U-EER', 'AUC']
save_results(summary, out_name + '_summary')
print(summary)
def classification_results(seed=1234):
np.random.seed(seed)
for dataset in DATASETS:
dataset_classification_results(dataset, 'keyname', out_name='%s_pohmm' % dataset)
dataset_classification_results(dataset, 'none', out_name='%s_hmm' % dataset)
dataset_classification_results('mobile', 'keyname',
features=['tau', 'duration'] + MOBILE_SENSORS,
out_name='mobile_sensor_pohmm')
dataset_classification_results('mobile', 'none',
features=['tau', 'duration'] + MOBILE_SENSORS,
out_name='mobile_sensor_hmm')
for dataset in ['fixed_text', 'free_text']: #DATASETS:
# feature_vector_results(dataset, '%s_features' % dataset, manhattan_factory, out_name='%s_manhattan' % dataset)
feature_vector_results(dataset, '%s_scaled_features' % dataset, manhattan_factory,
out_name='%s_scaled_manhattan' % dataset)
feature_vector_results(dataset, '%s_normed_features' % dataset, svm_factory, out_name='%s_svm' % dataset)
feature_vector_results('mobile', 'mobile_sensor_features', manhattan_factory,
out_name='mobile_sensor_manhattan')
feature_vector_results('mobile', 'mobile_sensor_scaled_features', manhattan_factory,
out_name='mobile_sensor_scaled_manhattan')
feature_vector_results('mobile', 'mobile_sensor_normed_features', svm_factory,
out_name='mobile_sensor_svm')
def prediction_results(seed=1234):
np.random.seed(seed)
dataset_prediction_results('fixed_text', 'keyname', out_name='fixed_text_pohmm', min_history=50, max_history=None)
dataset_prediction_results('fixed_text', 'none', out_name='fixed_text_hmm', min_history=50, max_history=None)
np.random.seed(seed)
dataset_prediction_results('free_text', 'keyname', out_name='free_text_pohmm', min_history=450, max_history=None)
dataset_prediction_results('free_text', 'none', out_name='free_text_hmm', min_history=450, max_history=None)
def plot_pohmm_example(dataset, seed=1234):
np.random.seed(seed)
df = load_data(dataset)
df = df[df.index.get_level_values(0) == np.random.choice(df.index.get_level_values(0).unique())]
df = preprocess_data(df, 'keyname', ['tau'])
m = pohmm_factory(df)
plot_model_empirical_pdf(df, m, 1000)
save_fig('%s_pohmm_example' % dataset)
def plot_montecarlo_hmm_vs_pohmm(dataset):
hmm_pvalues = load_results('%s_hmm_montecarlo_pvalues' % dataset)
pohmm_pvalues = load_results('%s_pohmm_montecarlo_pvalues' % dataset)
plot_hmm_vs_pohmm_pvalues(hmm_pvalues, pohmm_pvalues)
save_fig('%s_hmm_vs_pohmm_pvalues' % dataset)
def plot_roc_curves_hmm_vs_pohmm(dataset):
if dataset == 'password':
pivot = 'reference_user'
else:
pivot = 'fold'
manhattan_roc = session_roc(load_results('%s_manhattan_session_scores' % dataset), pivot)
scaled_manhattan_roc = session_roc(load_results('%s_scaled_manhattan_session_scores' % dataset), pivot)
one_class_svm = session_roc(load_results('%s_svm_session_scores' % dataset), pivot)
hmm_roc = session_roc(load_results('%s_hmm_session_scores' % dataset), pivot)
pohmm_roc = session_roc(load_results('%s_pohmm_session_scores' % dataset), pivot)
plot_roc([('Manhattan', manhattan_roc),
('Manhattan (scaled)', scaled_manhattan_roc),
('SVM (one-class)', one_class_svm),
('HMM', hmm_roc),
('POHMM', pohmm_roc)], 'Model', pivot)
save_fig(dataset + '_roc')
def summary_table(m, threshold=0.05):
rows = []
if m == 'AMRT':
SYSTEMS = ['hmm', 'pohmm']
COLUMNS = ['dataset', 'HMM', 'POHMM']
else:
SYSTEMS = ['manhattan', 'scaled_manhattan', 'svm', 'hmm', 'pohmm']
COLUMNS = ['dataset', 'Manhattan', 'Manhattan (scaled)', 'SVM (one-class)', 'HMM', 'POHMM']
for dataset in ['password', 'keypad', 'mobile', 'mobile_sensor', 'fixed_text', 'free_text']:
row = []
if ((m == 'EER') or (m == 'ACC')) and (dataset == 'password'):
measure = 'U-' + m
else:
measure = m
means = []
system_measures = []
for system in SYSTEMS:
session_scores = load_results('%s_%s_session_scores' % (dataset, system))
if measure == 'U-ACC':
measures = session_identification(session_scores).groupby('query_user').apply(ACC)
elif measure == 'U-EER':
measures = session_roc(session_scores, pivot='reference_user').groupby('reference_user').apply(EER)
elif measure == 'ACC':
measures = session_identification(session_scores).groupby('fold').apply(ACC)
elif measure == 'EER':
measures = session_roc(session_scores, pivot='fold').groupby('fold').apply(EER)
elif measure == 'AMRT':
scores = load_results('%s_%s_event_scores' % (dataset, system))
penalty = scores_penalty(scores)
continuous_ver = continuous_verification(penalty)
measures = continuous_ver['amrt']
system_measures.append(measures.values)
means.append(measures.mean())
row.append('%.3f (%.3f)' % (measures.mean(), measures.std()))
means = np.array(means)
if 'ACC' in measure:
idx = np.argmax(means)
else:
idx = np.argmin(means)
row[idx] = '*' + row[idx] + '*'
for i in range(len(system_measures)):
if i == idx:
continue
_, pvalue = wilcoxon(system_measures[idx], system_measures[i])
if pvalue > threshold/(len(system_measures) - 1):
row[i] = '*' + row[i] + '*'
rows.append([dataset] + row)
df = pd.DataFrame(rows, columns=COLUMNS)
df = df.set_index('dataset')
save_results(df, 'summary_%s' % m)
| en | 0.799447 | Create stratified k-folds Obtain identification and verification results using stratified k-fold cross validation and a model that scores a sample fit_model_fn should be a function that takes all the samples from a single user and returns a fitted model score_model_fn should be a function that takes a model and a single sample and scores the sample for the model # scores.set_index(['fold','reference_user','query_user','query_session'], inplace=True) Obtain identification and verification results using stratified k-fold cross validation and a model that scores a sample Creates a dataframe with cols: fold, reference_user, query_user, query_session, event_idx Args: folds: list of folds model: function that takes all the samples from a single user and returns a fitted model See sklearn.metrics.roc_curve # Generate an ROC curve for each fold, ordered by increasing threshold # interpolate to get the same threshold values in each fold Determine the maximum lockout time for each impostor/query sample # thresh = lockout.loc[tuple(df.iloc[0][['query_user', 'query_session']].values)].squeeze() Determine the maximum lockout time for each impostor/query sample Obtain rank-n classification accuracy for each fold Obtain the EER for one fold # line segment a given by endpoints a1, a2 # line segment b given by endpoints b1, b2 Area under the ROC curve Symmetric mean absolute prediction error Obtain results for a given dataset and features conditioned on the event column. # Load and preprocess the dataset # Create the validation folds # Aggregate and normalize the event scores within each session # Session and continuous identification, verification results # Identification of each event # Minimum rejection time # Summarize of session results # User-dependent EER is obtained by deriving an ROC curve for each user # Summarize continuous results, CI by session # Maximum lockout time, averaged for each session (against all reference users), CI by session # Plot a penalty function example # plot the error and ROC curves Obtain predictions for each model. Create stratified folds Train on 1-n_folds. Use the last fold to make predictions for each event # Load and preprocess the dataset # from .data import reduce_dataset # df = reduce_dataset(df, num_users=5, min_samples=1, max_samples=1) # fold, ref user, query user, query session, into future, event, ground truth, prediction # pred = hmm.predict_df(df[:i])[0] # Session and continuous identification, verification results # Summarize of session results # User-dependent EER is obtained by deriving an ROC curve for each user #DATASETS: # feature_vector_results(dataset, '%s_features' % dataset, manhattan_factory, out_name='%s_manhattan' % dataset) | 2.22557 | 2 |
tv_shows_api/admin.py | ataryihia/tv-shows-recommendations | 0 | 6612568 | <reponame>ataryihia/tv-shows-recommendations<gh_stars>0
from django.contrib import admin
from tv_shows_api import models
# Register your models here.
admin.site.register(models.UseProfileInfo)
| from django.contrib import admin
from tv_shows_api import models
# Register your models here.
admin.site.register(models.UseProfileInfo) | en | 0.968259 | # Register your models here. | 1.401469 | 1 |
python/dungeon_crawler/monster.py | matheuskiser/pdx_code_guild | 0 | 6612569 | <filename>python/dungeon_crawler/monster.py
class Monster(object):
def __init__(self):
self.name = "Fluffy"
self.health = 100
self.hit_points = 10
def get_name(self):
return self.name
def get_health(self):
return self.health
def get_hit_points(self):
return self.hit_points
def take_hit(self, hit):
self.health = self.health - hit
def get_status(self):
print "Monster's health is: " + str(self.get_health())
class Fluffy(Monster):
def __init__(self, player_health, player_hit_points):
Monster.__init__(self)
self.name = "Fluffy"
self.health = player_health * 1.2
self.hit_points = player_hit_points * .4
class Ghost(Monster):
def __init__(self, player_health, player_hit_points):
Monster.__init__(self)
self.name = "Ghost"
self.health = player_health * 1.4
self.hit_points = player_hit_points * .6
class Clown(Monster):
def __init__(self, player_health, player_hit_points):
Monster.__init__(self)
self.name = "Clown"
self.health = player_health * 1.6
self.hit_points = player_hit_points * .8
| <filename>python/dungeon_crawler/monster.py
class Monster(object):
def __init__(self):
self.name = "Fluffy"
self.health = 100
self.hit_points = 10
def get_name(self):
return self.name
def get_health(self):
return self.health
def get_hit_points(self):
return self.hit_points
def take_hit(self, hit):
self.health = self.health - hit
def get_status(self):
print "Monster's health is: " + str(self.get_health())
class Fluffy(Monster):
def __init__(self, player_health, player_hit_points):
Monster.__init__(self)
self.name = "Fluffy"
self.health = player_health * 1.2
self.hit_points = player_hit_points * .4
class Ghost(Monster):
def __init__(self, player_health, player_hit_points):
Monster.__init__(self)
self.name = "Ghost"
self.health = player_health * 1.4
self.hit_points = player_hit_points * .6
class Clown(Monster):
def __init__(self, player_health, player_hit_points):
Monster.__init__(self)
self.name = "Clown"
self.health = player_health * 1.6
self.hit_points = player_hit_points * .8
| none | 1 | 3.337032 | 3 | |
version.py | kmggh/python-simple-machine | 0 | 6612570 | # coding: utf-8
# © 2018 by <NAME>. All rights reserved.
"""The semantic version number."""
MAJOR = 1
MINOR = 2
PATCH = 1
VERSION = '{0}.{1}.{2}'.format(MAJOR, MINOR, PATCH)
| # coding: utf-8
# © 2018 by <NAME>. All rights reserved.
"""The semantic version number."""
MAJOR = 1
MINOR = 2
PATCH = 1
VERSION = '{0}.{1}.{2}'.format(MAJOR, MINOR, PATCH)
| en | 0.911758 | # coding: utf-8 # © 2018 by <NAME>. All rights reserved. The semantic version number. | 1.595458 | 2 |
article-subj-from-chebi.py | rwst/wikidata-molbio | 2 | 6612571 | <gh_stars>1-10
import os, json, argparse, sys, datetime, time
import pronto, six
"""
bzcat latest-all.json.bz2 |wikibase-dump-filter --simplify --claim 'P698&P921' |jq '[.id,.claims.P698,.claims.P921]' -c >PMID.ndjson
"""
# Initiate the parser
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--output_qs", help="output to QS",
action="store_true")
parser.add_argument("-q", "--query", help="perform SPARQL query",
action="store_true")
# Read arguments from the command line
args = parser.parse_args()
# Check for --version or -V
QS = args.output_qs
dontquery = not args.query
script = os.path.basename(sys.argv[0])[:-3]
print('Reading ChEBI')
ont = pronto.Ontology('chebi.obo')
if dontquery is False:
print('performing query...')
ret = os.popen('wd sparql {}.rq >{}.json'.format(script, script))
if ret.close() is not None:
raise
file = open('{}.json'.format(script))
s = file.read()
jol = json.loads(s)
dups_with_pmid = False
for d in jol:
chebid = 'CHEBI:' + d.get('value').get('value')
items = d.get('items')
lab = d.get('itemLabels')
term = ont.get(chebid)
if any(xref.id.startswith('PMID') for xref in term.xrefs):
dups_with_pmid = True
print('{} items:{} |{}|'.format(chebid, items, lab))
if dups_with_pmid:
print('!!!')
if dontquery is False:
print('performing query...')
ret = os.popen('wd sparql {}.rq1 >{}1.json'.format(script, script))
if ret.close() is not None:
raise
file = open('{}1.json'.format(script))
s = file.read()
jol = json.loads(s)
chebits = {}
for d in jol:
item = d.get('item')
chebid = 'CHEBI:' + d.get('chebi')
chebits[chebid] = item
pmids = {}
print('reading dump data...')
file = open('PMID.ndjson')
for line in file.readlines():
arr = json.loads(line.strip())
qit = arr[0]
pma = arr[1]
if len(pma) == 0:
continue
pmid = pma[0]
subj = arr[2]
if subj is None:
subj = []
p = pmids.get(pmid)
if p is None:
pmids[pmid] = ([qit], subj)
else:
p[0].append(qit)
p[1].extend(subj)
blacklist = []
for chebid in chebits.keys():
if chebid in blacklist:
continue
term = ont.get(chebid)
if term is None or term.obsolete:
print("CAN'T HAPPEN: {}".format(chebid))
continue
chebit = chebits.get(chebid)
pms = []
if term.definition is not None and term.xrefs is not None:
for xref in term.xrefs:
if xref.id.startswith('PMID'):
pms.append(xref.id[5:])
for pmid in pms:
p = pmids.get(pmid)
if p is None:
print('PMID {} is missing'.format(pmid))
continue
pmits,pmsbj = p
if chebit in pmsbj:
continue
if QS:
print('{}|P921|{}|S248|Q95689128|S683|"{}"'.format(min(pmits), chebit, chebid[6:]))
else:
j = {"id": min(pmits),
"claims": {
"P921": { "value": chebit,
"references": { "P248": "Q95689128", "P683": chebid[6:]} },
}
}
f = open('t.json', 'w')
f.write(json.dumps(j))
f.close()
print(json.dumps(j), flush=True)
ret = os.popen('wd ee t.json --summary article-subj-from-chebi')
print(ret.read())
if ret.close() is not None:
print('ERROR')
| import os, json, argparse, sys, datetime, time
import pronto, six
"""
bzcat latest-all.json.bz2 |wikibase-dump-filter --simplify --claim 'P698&P921' |jq '[.id,.claims.P698,.claims.P921]' -c >PMID.ndjson
"""
# Initiate the parser
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--output_qs", help="output to QS",
action="store_true")
parser.add_argument("-q", "--query", help="perform SPARQL query",
action="store_true")
# Read arguments from the command line
args = parser.parse_args()
# Check for --version or -V
QS = args.output_qs
dontquery = not args.query
script = os.path.basename(sys.argv[0])[:-3]
print('Reading ChEBI')
ont = pronto.Ontology('chebi.obo')
if dontquery is False:
print('performing query...')
ret = os.popen('wd sparql {}.rq >{}.json'.format(script, script))
if ret.close() is not None:
raise
file = open('{}.json'.format(script))
s = file.read()
jol = json.loads(s)
dups_with_pmid = False
for d in jol:
chebid = 'CHEBI:' + d.get('value').get('value')
items = d.get('items')
lab = d.get('itemLabels')
term = ont.get(chebid)
if any(xref.id.startswith('PMID') for xref in term.xrefs):
dups_with_pmid = True
print('{} items:{} |{}|'.format(chebid, items, lab))
if dups_with_pmid:
print('!!!')
if dontquery is False:
print('performing query...')
ret = os.popen('wd sparql {}.rq1 >{}1.json'.format(script, script))
if ret.close() is not None:
raise
file = open('{}1.json'.format(script))
s = file.read()
jol = json.loads(s)
chebits = {}
for d in jol:
item = d.get('item')
chebid = 'CHEBI:' + d.get('chebi')
chebits[chebid] = item
pmids = {}
print('reading dump data...')
file = open('PMID.ndjson')
for line in file.readlines():
arr = json.loads(line.strip())
qit = arr[0]
pma = arr[1]
if len(pma) == 0:
continue
pmid = pma[0]
subj = arr[2]
if subj is None:
subj = []
p = pmids.get(pmid)
if p is None:
pmids[pmid] = ([qit], subj)
else:
p[0].append(qit)
p[1].extend(subj)
blacklist = []
for chebid in chebits.keys():
if chebid in blacklist:
continue
term = ont.get(chebid)
if term is None or term.obsolete:
print("CAN'T HAPPEN: {}".format(chebid))
continue
chebit = chebits.get(chebid)
pms = []
if term.definition is not None and term.xrefs is not None:
for xref in term.xrefs:
if xref.id.startswith('PMID'):
pms.append(xref.id[5:])
for pmid in pms:
p = pmids.get(pmid)
if p is None:
print('PMID {} is missing'.format(pmid))
continue
pmits,pmsbj = p
if chebit in pmsbj:
continue
if QS:
print('{}|P921|{}|S248|Q95689128|S683|"{}"'.format(min(pmits), chebit, chebid[6:]))
else:
j = {"id": min(pmits),
"claims": {
"P921": { "value": chebit,
"references": { "P248": "Q95689128", "P683": chebid[6:]} },
}
}
f = open('t.json', 'w')
f.write(json.dumps(j))
f.close()
print(json.dumps(j), flush=True)
ret = os.popen('wd ee t.json --summary article-subj-from-chebi')
print(ret.read())
if ret.close() is not None:
print('ERROR') | en | 0.188222 | bzcat latest-all.json.bz2 |wikibase-dump-filter --simplify --claim 'P698&P921' |jq '[.id,.claims.P698,.claims.P921]' -c >PMID.ndjson # Initiate the parser # Read arguments from the command line # Check for --version or -V | 2.197506 | 2 |
Stack/540.Zigzag Iterator/Solution.py | Zhenye-Na/LxxxCode | 12 | 6612572 | <filename>Stack/540.Zigzag Iterator/Solution.py<gh_stars>10-100
from collections import deque
class ZigzagIterator:
"""
@param: v1: A 1d vector
@param: v2: A 1d vector
"""
def __init__(self, v1, v2):
# do intialization if necessary
self.v1 = deque(v1)
self.v2 = deque(v2)
self.flag = 0
"""
@return: An integer
"""
def next(self):
# write your code here
if self.flag % 2 == 1:
if self.v1:
return self.v1.popleft()
else:
return self.v2.popleft()
else:
if self.v2:
return self.v2.popleft()
else:
return self.v1.popleft()
"""
@return: True if has next
"""
def hasNext(self):
# write your code here
if len(self.v1) + len(self.v2) > 0:
self.flag += 1
return True
else:
return False
# Your ZigzagIterator object will be instantiated and called as such:
# solution, result = ZigzagIterator(v1, v2), []
# while solution.hasNext(): result.append(solution.next())
# Output result | <filename>Stack/540.Zigzag Iterator/Solution.py<gh_stars>10-100
from collections import deque
class ZigzagIterator:
"""
@param: v1: A 1d vector
@param: v2: A 1d vector
"""
def __init__(self, v1, v2):
# do intialization if necessary
self.v1 = deque(v1)
self.v2 = deque(v2)
self.flag = 0
"""
@return: An integer
"""
def next(self):
# write your code here
if self.flag % 2 == 1:
if self.v1:
return self.v1.popleft()
else:
return self.v2.popleft()
else:
if self.v2:
return self.v2.popleft()
else:
return self.v1.popleft()
"""
@return: True if has next
"""
def hasNext(self):
# write your code here
if len(self.v1) + len(self.v2) > 0:
self.flag += 1
return True
else:
return False
# Your ZigzagIterator object will be instantiated and called as such:
# solution, result = ZigzagIterator(v1, v2), []
# while solution.hasNext(): result.append(solution.next())
# Output result | en | 0.713403 | @param: v1: A 1d vector @param: v2: A 1d vector # do intialization if necessary @return: An integer # write your code here @return: True if has next # write your code here # Your ZigzagIterator object will be instantiated and called as such: # solution, result = ZigzagIterator(v1, v2), [] # while solution.hasNext(): result.append(solution.next()) # Output result | 3.5932 | 4 |
scripts/mrms/make_mrms_rasters.py | trentford/iem | 1 | 6612573 | <reponame>trentford/iem
"""
Generate a raster of XXhour precipitation totals from MRMS
run from RUN_10_AFTER.sh
"""
from __future__ import print_function
import datetime
import os
import sys
import tempfile
import subprocess
import json
import gzip
import unittest
import numpy as np
from PIL import Image
import pyiem.mrms as mrms
import pygrib
TMP = "/mesonet/tmp"
PQI = "/home/ldm/bin/pqinsert"
MISSED_FILES = []
DOWNLOADED_FILES = []
def convert_to_image(data):
"""Convert data with units of mm into image space
255 levels... wanna do 0 to 20 inches
index 255 is missing, index 0 is 0
0-1 -> 100 - 0.01 res || 0 - 25 -> 100 - 0.25 mm 0
1-5 -> 80 - 0.05 res || 25 - 125 -> 80 - 1.25 mm 100
5-20 -> 75 - 0.20 res || 125 - 500 -> 75 - 5 mm 180
000 -> 099 0.25mm 000.00 to 024.75
100 -> 179 1.25mm 025.00 to 123.75
180 -> 254 5.00mm 125.00 to 495.00
254 500.00+
255 MISSING/BAD DATA
"""
# Values above 500 mm are set to 254
imgdata = np.where(data >= 500, 254, 0)
imgdata = np.where(np.logical_and(data >= 125, data < 500),
180 + ((data - 125.) / 5.0), imgdata)
imgdata = np.where(np.logical_and(data >= 25, data < 125),
100 + ((data - 25.) / 1.25), imgdata)
imgdata = np.where(np.logical_and(data >= 0, data < 25),
data / 0.25, imgdata)
# -3 is no coverage -> 255
# -1 is missing, so zero
# Index 255 is missing
imgdata = np.where(data < 0, 0, imgdata)
imgdata = np.where(data < -1, 255, imgdata)
return imgdata
def cleanup():
"""Remove tmp downloaded files"""
for fn in DOWNLOADED_FILES:
if os.path.isfile(fn):
os.unlink(fn)
def is_realtime(gts):
"""Is this timestamp a realtime product"""
utcnow = datetime.datetime.utcnow()
return utcnow.strftime("%Y%m%d%H") == gts.strftime("%Y%m%d%H")
def doit(gts, hr):
"""
Actually generate a PNG file from the 8 NMQ tiles
"""
irealtime = is_realtime(gts)
routes = "ac" if irealtime else "a"
sts = gts - datetime.timedelta(hours=hr)
times = [gts]
if hr > 24:
times.append(gts - datetime.timedelta(hours=24))
if hr == 72:
times.append(gts - datetime.timedelta(hours=48))
metadata = {'start_valid': sts.strftime("%Y-%m-%dT%H:%M:%SZ"),
'end_valid': gts.strftime("%Y-%m-%dT%H:%M:%SZ"),
'units': 'mm'}
total = None
mproduct = "RadarOnly_QPE_24H" if hr >= 24 else "RadarOnly_QPE_01H"
for now in times:
gribfn = mrms.fetch(mproduct, now)
if gribfn is None:
print(("make_mrms_rasters.py[%s] MISSING %s\n %s\n"
) % (hr, now.strftime("%Y-%m-%dT%H:%MZ"), gribfn))
MISSED_FILES.append(gribfn)
return
DOWNLOADED_FILES.append(gribfn)
fp = gzip.GzipFile(gribfn, 'rb')
(tmpfp, tmpfn) = tempfile.mkstemp()
tmpfp = open(tmpfn, 'wb')
tmpfp.write(fp.read())
tmpfp.close()
grbs = pygrib.open(tmpfn)
grb = grbs[1]
os.unlink(tmpfn)
# careful here, how we deal with the two missing values!
if total is None:
total = grb['values']
else:
maxgrid = np.maximum(grb['values'], total)
total = np.where(np.logical_and(grb['values'] >= 0, total >= 0),
grb['values'] + total, maxgrid)
imgdata = convert_to_image(total)
(tmpfp, tmpfn) = tempfile.mkstemp()
# Create Image
png = Image.fromarray(imgdata.astype('u1'))
png.putpalette(mrms.make_colorramp())
png.save('%s.png' % (tmpfn,))
if irealtime:
# create a second PNG with null values set to black
imgdata = np.where(imgdata == 255, 0, imgdata)
png = Image.fromarray(imgdata.astype('u1'))
png.putpalette(mrms.make_colorramp())
png.save('%s_nn.png' % (tmpfn,))
# Now we need to generate the world file
mrms.write_worldfile('%s.wld' % (tmpfn,))
if irealtime:
mrms.write_worldfile('%s_nn.wld' % (tmpfn,))
# Inject WLD file
pqstr = ("%s -i -p 'plot %s %s "
"gis/images/4326/mrms/p%ih.wld GIS/mrms/p%ih_%s.wld wld' "
"%s.wld"
"") % (PQI, routes, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
if irealtime:
pqstr = ("%s -i -p 'plot c %s "
"gis/images/4326/mrms/p%ih_nn.wld "
"GIS/mrms/p%ih_%s.wld wld' "
"%s_nn.wld"
"") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
# Now we inject into LDM
pqstr = ("%s -i -p 'plot %s %s "
"gis/images/4326/mrms/p%ih.png GIS/mrms/p%ih_%s.png png' "
"%s.png"
"") % (PQI, routes, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
if irealtime:
# Now we inject into LDM
pqstr = ("%s -i -p 'plot c %s "
"gis/images/4326/mrms/p%ih_nn.png "
"GIS/mrms/p%ih_%s.png png' "
"%s_nn.png"
"") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
if irealtime:
# Create 900913 image
cmd = ("gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff "
"-tr 1000.0 1000.0 %s.png %s.tif") % (tmpfn, tmpfn)
subprocess.call(cmd, shell=True)
cmd = ("gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff "
"-tr 1000.0 1000.0 %s_nn.png %s_nn.tif") % (tmpfn, tmpfn)
subprocess.call(cmd, shell=True)
# Insert into LDM
pqstr = ("%s -i -p 'plot c %s "
"gis/images/900913/mrms/p%ih.tif "
"GIS/mrms/p%ih_%s.tif tif' "
"%s.tif"
"") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
pqstr = ("%s -i -p 'plot c %s "
"gis/images/900913/mrms/p%ih_nn.tif "
"GIS/mrms/p%ih_%s.tif tif' "
"%s_nn.tif"
"") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
j = open("%s.json" % (tmpfn,), 'w')
j.write(json.dumps(dict(meta=metadata)))
j.close()
# Insert into LDM
pqstr = ("%s -i -p 'plot c %s "
"gis/images/4326/mrms/p%ih.json "
"GIS/mrms/p%ih_%s.json json'"
" %s.json") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
pqstr = ("%s -i -p 'plot c %s "
"gis/images/4326/mrms/p%ih_nn.json "
"GIS/mrms/p%ih_%s.json json'"
" %s.json") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
for suffix in ['tif', 'json', 'png', 'wld']:
fn = '%s.%s' % (tmpfn, suffix)
if os.path.isfile(fn):
os.unlink(fn)
if irealtime:
for suffix in ['tif', 'png', 'wld']:
fn = '%s_nn.%s' % (tmpfn, suffix)
if os.path.isfile(fn):
os.unlink(fn)
os.close(tmpfp)
os.unlink(tmpfn)
def main(argv):
""" We are always explicitly called """
gts = datetime.datetime(int(argv[1]), int(argv[2]),
int(argv[3]), int(argv[4]), 0)
for hr in [1, 24, 48, 72]:
doit(gts, hr)
cleanup()
if __name__ == "__main__":
main(sys.argv)
class test(unittest.TestCase):
"""What, test code, Shirely you jest"""
def test_ramp(self):
""" Check our work """
img = convert_to_image(np.array([25, ]))
self.assertEquals(img[0], 100)
| """
Generate a raster of XXhour precipitation totals from MRMS
run from RUN_10_AFTER.sh
"""
from __future__ import print_function
import datetime
import os
import sys
import tempfile
import subprocess
import json
import gzip
import unittest
import numpy as np
from PIL import Image
import pyiem.mrms as mrms
import pygrib
TMP = "/mesonet/tmp"
PQI = "/home/ldm/bin/pqinsert"
MISSED_FILES = []
DOWNLOADED_FILES = []
def convert_to_image(data):
"""Convert data with units of mm into image space
255 levels... wanna do 0 to 20 inches
index 255 is missing, index 0 is 0
0-1 -> 100 - 0.01 res || 0 - 25 -> 100 - 0.25 mm 0
1-5 -> 80 - 0.05 res || 25 - 125 -> 80 - 1.25 mm 100
5-20 -> 75 - 0.20 res || 125 - 500 -> 75 - 5 mm 180
000 -> 099 0.25mm 000.00 to 024.75
100 -> 179 1.25mm 025.00 to 123.75
180 -> 254 5.00mm 125.00 to 495.00
254 500.00+
255 MISSING/BAD DATA
"""
# Values above 500 mm are set to 254
imgdata = np.where(data >= 500, 254, 0)
imgdata = np.where(np.logical_and(data >= 125, data < 500),
180 + ((data - 125.) / 5.0), imgdata)
imgdata = np.where(np.logical_and(data >= 25, data < 125),
100 + ((data - 25.) / 1.25), imgdata)
imgdata = np.where(np.logical_and(data >= 0, data < 25),
data / 0.25, imgdata)
# -3 is no coverage -> 255
# -1 is missing, so zero
# Index 255 is missing
imgdata = np.where(data < 0, 0, imgdata)
imgdata = np.where(data < -1, 255, imgdata)
return imgdata
def cleanup():
"""Remove tmp downloaded files"""
for fn in DOWNLOADED_FILES:
if os.path.isfile(fn):
os.unlink(fn)
def is_realtime(gts):
"""Is this timestamp a realtime product"""
utcnow = datetime.datetime.utcnow()
return utcnow.strftime("%Y%m%d%H") == gts.strftime("%Y%m%d%H")
def doit(gts, hr):
"""
Actually generate a PNG file from the 8 NMQ tiles
"""
irealtime = is_realtime(gts)
routes = "ac" if irealtime else "a"
sts = gts - datetime.timedelta(hours=hr)
times = [gts]
if hr > 24:
times.append(gts - datetime.timedelta(hours=24))
if hr == 72:
times.append(gts - datetime.timedelta(hours=48))
metadata = {'start_valid': sts.strftime("%Y-%m-%dT%H:%M:%SZ"),
'end_valid': gts.strftime("%Y-%m-%dT%H:%M:%SZ"),
'units': 'mm'}
total = None
mproduct = "RadarOnly_QPE_24H" if hr >= 24 else "RadarOnly_QPE_01H"
for now in times:
gribfn = mrms.fetch(mproduct, now)
if gribfn is None:
print(("make_mrms_rasters.py[%s] MISSING %s\n %s\n"
) % (hr, now.strftime("%Y-%m-%dT%H:%MZ"), gribfn))
MISSED_FILES.append(gribfn)
return
DOWNLOADED_FILES.append(gribfn)
fp = gzip.GzipFile(gribfn, 'rb')
(tmpfp, tmpfn) = tempfile.mkstemp()
tmpfp = open(tmpfn, 'wb')
tmpfp.write(fp.read())
tmpfp.close()
grbs = pygrib.open(tmpfn)
grb = grbs[1]
os.unlink(tmpfn)
# careful here, how we deal with the two missing values!
if total is None:
total = grb['values']
else:
maxgrid = np.maximum(grb['values'], total)
total = np.where(np.logical_and(grb['values'] >= 0, total >= 0),
grb['values'] + total, maxgrid)
imgdata = convert_to_image(total)
(tmpfp, tmpfn) = tempfile.mkstemp()
# Create Image
png = Image.fromarray(imgdata.astype('u1'))
png.putpalette(mrms.make_colorramp())
png.save('%s.png' % (tmpfn,))
if irealtime:
# create a second PNG with null values set to black
imgdata = np.where(imgdata == 255, 0, imgdata)
png = Image.fromarray(imgdata.astype('u1'))
png.putpalette(mrms.make_colorramp())
png.save('%s_nn.png' % (tmpfn,))
# Now we need to generate the world file
mrms.write_worldfile('%s.wld' % (tmpfn,))
if irealtime:
mrms.write_worldfile('%s_nn.wld' % (tmpfn,))
# Inject WLD file
pqstr = ("%s -i -p 'plot %s %s "
"gis/images/4326/mrms/p%ih.wld GIS/mrms/p%ih_%s.wld wld' "
"%s.wld"
"") % (PQI, routes, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
if irealtime:
pqstr = ("%s -i -p 'plot c %s "
"gis/images/4326/mrms/p%ih_nn.wld "
"GIS/mrms/p%ih_%s.wld wld' "
"%s_nn.wld"
"") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
# Now we inject into LDM
pqstr = ("%s -i -p 'plot %s %s "
"gis/images/4326/mrms/p%ih.png GIS/mrms/p%ih_%s.png png' "
"%s.png"
"") % (PQI, routes, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
if irealtime:
# Now we inject into LDM
pqstr = ("%s -i -p 'plot c %s "
"gis/images/4326/mrms/p%ih_nn.png "
"GIS/mrms/p%ih_%s.png png' "
"%s_nn.png"
"") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
if irealtime:
# Create 900913 image
cmd = ("gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff "
"-tr 1000.0 1000.0 %s.png %s.tif") % (tmpfn, tmpfn)
subprocess.call(cmd, shell=True)
cmd = ("gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff "
"-tr 1000.0 1000.0 %s_nn.png %s_nn.tif") % (tmpfn, tmpfn)
subprocess.call(cmd, shell=True)
# Insert into LDM
pqstr = ("%s -i -p 'plot c %s "
"gis/images/900913/mrms/p%ih.tif "
"GIS/mrms/p%ih_%s.tif tif' "
"%s.tif"
"") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
pqstr = ("%s -i -p 'plot c %s "
"gis/images/900913/mrms/p%ih_nn.tif "
"GIS/mrms/p%ih_%s.tif tif' "
"%s_nn.tif"
"") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
j = open("%s.json" % (tmpfn,), 'w')
j.write(json.dumps(dict(meta=metadata)))
j.close()
# Insert into LDM
pqstr = ("%s -i -p 'plot c %s "
"gis/images/4326/mrms/p%ih.json "
"GIS/mrms/p%ih_%s.json json'"
" %s.json") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
pqstr = ("%s -i -p 'plot c %s "
"gis/images/4326/mrms/p%ih_nn.json "
"GIS/mrms/p%ih_%s.json json'"
" %s.json") % (PQI, gts.strftime("%Y%m%d%H%M"), hr, hr,
gts.strftime("%Y%m%d%H%M"), tmpfn)
subprocess.call(pqstr, shell=True)
for suffix in ['tif', 'json', 'png', 'wld']:
fn = '%s.%s' % (tmpfn, suffix)
if os.path.isfile(fn):
os.unlink(fn)
if irealtime:
for suffix in ['tif', 'png', 'wld']:
fn = '%s_nn.%s' % (tmpfn, suffix)
if os.path.isfile(fn):
os.unlink(fn)
os.close(tmpfp)
os.unlink(tmpfn)
def main(argv):
""" We are always explicitly called """
gts = datetime.datetime(int(argv[1]), int(argv[2]),
int(argv[3]), int(argv[4]), 0)
for hr in [1, 24, 48, 72]:
doit(gts, hr)
cleanup()
if __name__ == "__main__":
main(sys.argv)
class test(unittest.TestCase):
"""What, test code, Shirely you jest"""
def test_ramp(self):
""" Check our work """
img = convert_to_image(np.array([25, ]))
self.assertEquals(img[0], 100) | en | 0.736262 | Generate a raster of XXhour precipitation totals from MRMS run from RUN_10_AFTER.sh Convert data with units of mm into image space 255 levels... wanna do 0 to 20 inches index 255 is missing, index 0 is 0 0-1 -> 100 - 0.01 res || 0 - 25 -> 100 - 0.25 mm 0 1-5 -> 80 - 0.05 res || 25 - 125 -> 80 - 1.25 mm 100 5-20 -> 75 - 0.20 res || 125 - 500 -> 75 - 5 mm 180 000 -> 099 0.25mm 000.00 to 024.75 100 -> 179 1.25mm 025.00 to 123.75 180 -> 254 5.00mm 125.00 to 495.00 254 500.00+ 255 MISSING/BAD DATA # Values above 500 mm are set to 254 # -3 is no coverage -> 255 # -1 is missing, so zero # Index 255 is missing Remove tmp downloaded files Is this timestamp a realtime product Actually generate a PNG file from the 8 NMQ tiles # careful here, how we deal with the two missing values! # Create Image # create a second PNG with null values set to black # Now we need to generate the world file # Inject WLD file # Now we inject into LDM # Now we inject into LDM # Create 900913 image # Insert into LDM # Insert into LDM We are always explicitly called What, test code, Shirely you jest Check our work | 2.608603 | 3 |
mrtopo/mutator/mutator.py | FaizChishtie/MrTopo | 1 | 6612574 | <reponame>FaizChishtie/MrTopo
"""
MrTopo - Mutator - handles mutation of networks
"""
from mrtopo.logger import log
from mrtopo.structures.mutantnetwork import MutantNetwork
from mrtopo.mutator.operators import Operations, do
from shutil import copyfile
from math import floor
import random
GENERATIONS = 30
def mutate(network, number_of_mutations = 30):
log("Mutator - mutating network " + str(number_of_mutations) + " times")
mutant_networks = [] # type MutantNetwork
for i in range(number_of_mutations):
operation = random.choice(list(Operations))
mn = do(operation, network.deep_copy(), i) # mutate deep copy of network
if mn:
mutant_networks.append(mn)
return mutant_networks
def get_var_names(coll):
names = []
for item in coll:
name = ""
for c in item[0]:
if c == "=":
break
else:
name += str(c)
names.append(name.strip())
return names
def mutated_lines(n_remove, network_arr):
deleted = [] # links to be removed
for i in range(n_remove):
deleted.append(random.choice(network_arr))
return deleted
| """
MrTopo - Mutator - handles mutation of networks
"""
from mrtopo.logger import log
from mrtopo.structures.mutantnetwork import MutantNetwork
from mrtopo.mutator.operators import Operations, do
from shutil import copyfile
from math import floor
import random
GENERATIONS = 30
def mutate(network, number_of_mutations = 30):
log("Mutator - mutating network " + str(number_of_mutations) + " times")
mutant_networks = [] # type MutantNetwork
for i in range(number_of_mutations):
operation = random.choice(list(Operations))
mn = do(operation, network.deep_copy(), i) # mutate deep copy of network
if mn:
mutant_networks.append(mn)
return mutant_networks
def get_var_names(coll):
names = []
for item in coll:
name = ""
for c in item[0]:
if c == "=":
break
else:
name += str(c)
names.append(name.strip())
return names
def mutated_lines(n_remove, network_arr):
deleted = [] # links to be removed
for i in range(n_remove):
deleted.append(random.choice(network_arr))
return deleted | en | 0.821204 | MrTopo - Mutator - handles mutation of networks # type MutantNetwork # mutate deep copy of network # links to be removed | 2.739584 | 3 |
src/pyroe/ProcessedQuant.py | COMBINE-lab/pyroe | 0 | 6612575 | from .pyroe_utils import say
import pandas as pd
import os
import shutil
import urllib.request
import tarfile
from .load_fry import load_fry
class ProcessedQuant:
"""
A class stores the information of the quantification
result of a processed dataset
"""
def get_available_dataset_df():
"""
get the dataframe in which each row contains
the information of an available dataset that
can be fetched.
"""
# load available dataset sheet
location = os.path.dirname(os.path.realpath(__file__))
my_file = os.path.join(location, "data", "available_datasets.tsv")
available_datasets = pd.read_csv(my_file, sep="\t")
return available_datasets
def print_available_datasets():
"""
Print the index and name of the available datasets.
"""
available_datasets = ProcessedQuant.get_available_dataset_df()
epilog = "\n".join(
[
"".join([f"{idx+1}", ". ", dataset_name])
for (idx, dataset_name) in zip(
range(available_datasets.shape[0]),
available_datasets["dataset_name"].tolist(),
)
]
)
epilog = " \n".join(["Index of the available datasets:", epilog])
print(epilog)
def __init__(self, dataset_id: int):
available_datasets = ProcessedQuant.get_available_dataset_df()
if dataset_id < 0 or dataset_id >= available_datasets.shape[0]:
raise ValueError(
"Invalid dataset_id, run",
"ProcessedQuant.print_available_datasets()",
"to get available dataset ids.",
)
# get the info of the queried dataset id, python is zero based.
available_dataset = available_datasets.iloc[dataset_id - 1, :]
self.dataset_id = available_dataset["dataset_id"]
self.chemistry = available_dataset["chemistry"]
self.reference = available_dataset["reference"]
self.dataset_name = available_dataset["dataset_name"]
self.dataset_url = available_dataset["dataset_url"]
self.fastq_url = available_dataset["fastq_url"]
self.fastq_MD5sum = available_dataset["fastq_MD5sum"]
self.delete_fastq = available_dataset["delete_fastq"]
self.feature_barcode_csv_url = available_dataset["feature_barcode_csv_url"]
self.multiplexing_library_csv_url = available_dataset[
"multiplexing_library_csv_url"
]
self.quant_tar_url = available_dataset["quant_tar_url"]
self.quant_path = None
self.tar_path = None
self.anndata = None
def fetch_quant(
self, tar_dir="quant_tar", file_name=None, force=False, quiet=False
):
"""
Fetch processed quantification to a local directory.\\
The path to the fetched tar file will be sotred
as the `ProcessedQuant.tar_path` attribute.
Parameters
----------
tar_dir: `str` (default: `quant_tar`)
The directory for saving the fetched tar file.
file_name: `str` (default: dataset id)
Customized file name of the fetched tar file.
Default is the dataset id.
force: `bool` (default: `False`)
If `True`, any existing tar file will be overwritten.
quiet: `bool` (default: `False`)
If `True`, help messaged will be printed out.
"""
self.check_validity()
say(quiet, f"Fetching the quant result of dataset #{self.dataset_id}")
# check whether tar file exist,
# download it if needed
if self.tar_path is not None:
if os.path.exists(self.tar_path) and (not force):
say(
quiet,
" - The tar_path attribute is not None and the path exists:",
)
say(quiet, f" {self.tar_path}")
say(quiet, " - Pass force=True to fetch it again\n")
return
# folder for (temporarily) storing tar files.
if not os.path.exists(tar_dir):
os.makedirs(tar_dir)
# process file_name
if file_name is None:
file_name = "".join([f"{self.dataset_id}", ".tar"])
elif not file_name.endswith(".tar"):
file_name = "".join([f"{file_name}", ".tar"])
# update tar_path
tar_path = os.path.join(tar_dir, file_name)
if os.path.exists(tar_path):
if force:
say(quiet, " - Overwriting the existing tar file:")
say(quiet, f" {tar_path}")
else:
say(quiet, " - Use the existing file as tar_path:")
say(quiet, f" {tar_path}")
say(quiet, " - Pass force=True to overwrite it")
self.tar_path = tar_path
return
# download tar file
urllib.request.urlretrieve(self.quant_tar_url, tar_path)
self.tar_path = tar_path
say(quiet, " - Fetched quant tar is saved as:")
say(quiet, f" {self.tar_path}")
def decompress_quant(
self,
quant_dir="processed_quant",
quant_path_name=None,
force=False,
quiet=False,
):
"""
Decompress the fetched quantification to a local directory.\\
The path to the decompressed quantification result will be sotred
as the `ProcessedQuant.quant_path` attribute.
Parameters
----------
quant_dir: `str` (default: `processed_quant`)
The directory for saving decompressed quantification result folder.
quant_path_name: `str` (default: dataset id)
Customized folder name of the quantification result folder.
Default is the dataset id.
force: `bool` (default: `False`)
If `True`, existing tar file will be overwritten.
quiet: `bool` (default: `False`)
If `True`, help messaged will be printed out.
"""
# make sure class is valid
self.check_validity()
# make sure tar file is valid
if self.tar_path is None:
raise ValueError(
"tar_path attribute is None, run ProcessedQuant.fetch_quant() method to fetch the tar file."
)
say(
quiet,
f"Decompressing the quant result of dataset #{self.dataset_id} using:\n {self.tar_path}",
)
# if quant_path is not None, return unless force=TRUE
if self.quant_path is not None:
if os.path.exists(self.tar_path) and (not force):
say(
quiet,
" - The quant_path attribute is not None and the path exists:",
)
say(quiet, f" {self.quant_path}")
say(quiet, " - pass force=True to decompress it again")
return
# check expected output dir
if quant_path_name is None:
quant_path_name = self.dataset_id
quant_parent_dir = os.path.join(quant_dir, f"{quant_path_name}")
if os.path.exists(quant_parent_dir):
if force:
say(quiet, " - Removing existing quant folder:")
say(quiet, f" {quant_parent_dir}")
shutil.rmtree(quant_parent_dir)
else:
say(quiet, " - Use the existing directory as quant_path:")
say(quiet, f" {quant_parent_dir}")
say(quiet, " - pass force=True to overwrite it")
self.quant_path = os.path.join(
quant_parent_dir, next(os.walk(quant_parent_dir))[1][0]
)
return
# decompress the tar file
tf = tarfile.open(self.tar_path)
tf.extractall(quant_parent_dir)
self.quant_path = os.path.join(
quant_parent_dir, next(os.walk(quant_parent_dir))[1][0]
)
say(quiet, " - Decompressed quant result is saved as:")
say(quiet, f" {self.quant_path}")
def load_quant(
self, output_format="scRNA", force=False, nonzero=False, quiet=False
):
"""
Load the quantification result as the `ProcessedQuant.anndata` attribute.\\
Parameters
----------
output_format: `str` or `dict` (default: `scRNA`)
A string represents one of the pre-defined output formats, which are "scRNA", "snRNA" and "velocity". \\
If a customized format of the returned `AnnData` is needed, one can pass a dictionary.\\
See [load_fry](https://github.com/COMBINE-lab/pyroe/blob/main/src/pyroe/load_fry.py) for details.
nonzero: `bool` (default: `False`)
If `True`, the genes that have zero expression across all cells will be removed.
quiet: `bool` (default: `False`)
If `True`, help messaged will not be printed out.
"""
self.check_validity()
# make sure quant dir is valid
if self.quant_path is None:
raise ValueError(
"The quant_path attribute is None, run ProcessedQuant.fetch_quant() and then ProcessedQuant.decompress_quant() to generate it."
)
if not os.path.exists(self.quant_path):
raise ValueError(
"The quant_path attribute is invalid, run ProcessedQuant.fetch_quant() and then ProcessedQuant.decompress_quant() to regenerate it."
)
if (self.anndata is not None) and (not force):
say(quiet, " - The anndata attribute is not None.")
say(quiet, " - pass force=True to update it")
return
say(quiet, f"Loading dataset #{self.dataset_id} from:")
say(quiet, f" {self.quant_path}")
self.anndata = load_fry(
frydir=self.quant_path,
output_format=output_format,
nonzero=nonzero,
quiet=quiet,
)
def FDL(
dataset_id: int,
tar_dir="quant_tar",
tar_file_name=None,
quant_dir="processed_quant",
quant_path_name=None,
output_format="scRNA",
nonzero=False,
force=False,
quiet=False,
):
"""
Call `ProcessedQuant.fetch_quant()`, ProcessedQuant.decompress_quant() and ProcessedQuant.load_quant() in turn
for a dataset to generate a complete ProcessedQuant object.
Parameters
-----------------------
dataset_id: `int`
The id of an available dataset
tar_dir: `str` (default: `quant_tar`)
The directory for saving the fetched tar file.
tar_file_name: `str` (default: dataset id)
Customized file name of the fetched tar file.
Default is the dataset id.
quant_dir: `str` (default: `processed_quant`)
The directory for saving decompressed quantification result folder.
quant_path_name: `str` (default: dataset id)
Customized folder name of the quantification result folder.
Default is the dataset id.
output_format: `str` or `dict` (default: `scRNA`)
A string represents one of the pre-defined output formats, which are "scRNA", "snRNA" and "velocity". \\
If a customized format of the returned `AnnData` is needed, one can pass a Dictionary.\\
See [load_fry](https://github.com/COMBINE-lab/pyroe/blob/main/src/pyroe/load_fry.py) for details.
nonzero: `bool` (default: `False`)
If `True`, existing tar file will be overwritten.
force: `bool` (default: `False`)
If `True`, existing tar file will be overwritten.
quiet: `bool` (default: `False`)
If `True`, help messaged will be printed out.
"""
processed_quant = ProcessedQuant(dataset_id)
# fetch it
processed_quant.fetch_quant(
tar_dir=tar_dir, file_name=tar_file_name, force=force, quiet=quiet
)
# decompress it
processed_quant.decompress_quant(
quant_dir=quant_dir,
quant_path_name=quant_path_name,
force=force,
quiet=quiet,
)
# load it
processed_quant.load_quant(
output_format=output_format, force=force, nonzero=nonzero, quiet=quiet
)
return processed_quant
def check_validity(self):
if (
self.quant_tar_url is None
or self.dataset_id is None
or self.chemistry is None
or self.reference is None
or self.dataset_name is None
or self.dataset_url is None
or self.fastq_url is None
or self.fastq_MD5sum is None
or self.delete_fastq is None
or self.feature_barcode_csv_url is None
or self.multiplexing_library_csv_url is None
or self.quant_tar_url is None
):
raise ValueError(
"Incomplete class object, use",
"ProcessedQuant(dataset_id)",
"to instantiate it.",
)
| from .pyroe_utils import say
import pandas as pd
import os
import shutil
import urllib.request
import tarfile
from .load_fry import load_fry
class ProcessedQuant:
"""
A class stores the information of the quantification
result of a processed dataset
"""
def get_available_dataset_df():
"""
get the dataframe in which each row contains
the information of an available dataset that
can be fetched.
"""
# load available dataset sheet
location = os.path.dirname(os.path.realpath(__file__))
my_file = os.path.join(location, "data", "available_datasets.tsv")
available_datasets = pd.read_csv(my_file, sep="\t")
return available_datasets
def print_available_datasets():
"""
Print the index and name of the available datasets.
"""
available_datasets = ProcessedQuant.get_available_dataset_df()
epilog = "\n".join(
[
"".join([f"{idx+1}", ". ", dataset_name])
for (idx, dataset_name) in zip(
range(available_datasets.shape[0]),
available_datasets["dataset_name"].tolist(),
)
]
)
epilog = " \n".join(["Index of the available datasets:", epilog])
print(epilog)
def __init__(self, dataset_id: int):
available_datasets = ProcessedQuant.get_available_dataset_df()
if dataset_id < 0 or dataset_id >= available_datasets.shape[0]:
raise ValueError(
"Invalid dataset_id, run",
"ProcessedQuant.print_available_datasets()",
"to get available dataset ids.",
)
# get the info of the queried dataset id, python is zero based.
available_dataset = available_datasets.iloc[dataset_id - 1, :]
self.dataset_id = available_dataset["dataset_id"]
self.chemistry = available_dataset["chemistry"]
self.reference = available_dataset["reference"]
self.dataset_name = available_dataset["dataset_name"]
self.dataset_url = available_dataset["dataset_url"]
self.fastq_url = available_dataset["fastq_url"]
self.fastq_MD5sum = available_dataset["fastq_MD5sum"]
self.delete_fastq = available_dataset["delete_fastq"]
self.feature_barcode_csv_url = available_dataset["feature_barcode_csv_url"]
self.multiplexing_library_csv_url = available_dataset[
"multiplexing_library_csv_url"
]
self.quant_tar_url = available_dataset["quant_tar_url"]
self.quant_path = None
self.tar_path = None
self.anndata = None
def fetch_quant(
self, tar_dir="quant_tar", file_name=None, force=False, quiet=False
):
"""
Fetch processed quantification to a local directory.\\
The path to the fetched tar file will be sotred
as the `ProcessedQuant.tar_path` attribute.
Parameters
----------
tar_dir: `str` (default: `quant_tar`)
The directory for saving the fetched tar file.
file_name: `str` (default: dataset id)
Customized file name of the fetched tar file.
Default is the dataset id.
force: `bool` (default: `False`)
If `True`, any existing tar file will be overwritten.
quiet: `bool` (default: `False`)
If `True`, help messaged will be printed out.
"""
self.check_validity()
say(quiet, f"Fetching the quant result of dataset #{self.dataset_id}")
# check whether tar file exist,
# download it if needed
if self.tar_path is not None:
if os.path.exists(self.tar_path) and (not force):
say(
quiet,
" - The tar_path attribute is not None and the path exists:",
)
say(quiet, f" {self.tar_path}")
say(quiet, " - Pass force=True to fetch it again\n")
return
# folder for (temporarily) storing tar files.
if not os.path.exists(tar_dir):
os.makedirs(tar_dir)
# process file_name
if file_name is None:
file_name = "".join([f"{self.dataset_id}", ".tar"])
elif not file_name.endswith(".tar"):
file_name = "".join([f"{file_name}", ".tar"])
# update tar_path
tar_path = os.path.join(tar_dir, file_name)
if os.path.exists(tar_path):
if force:
say(quiet, " - Overwriting the existing tar file:")
say(quiet, f" {tar_path}")
else:
say(quiet, " - Use the existing file as tar_path:")
say(quiet, f" {tar_path}")
say(quiet, " - Pass force=True to overwrite it")
self.tar_path = tar_path
return
# download tar file
urllib.request.urlretrieve(self.quant_tar_url, tar_path)
self.tar_path = tar_path
say(quiet, " - Fetched quant tar is saved as:")
say(quiet, f" {self.tar_path}")
def decompress_quant(
self,
quant_dir="processed_quant",
quant_path_name=None,
force=False,
quiet=False,
):
"""
Decompress the fetched quantification to a local directory.\\
The path to the decompressed quantification result will be sotred
as the `ProcessedQuant.quant_path` attribute.
Parameters
----------
quant_dir: `str` (default: `processed_quant`)
The directory for saving decompressed quantification result folder.
quant_path_name: `str` (default: dataset id)
Customized folder name of the quantification result folder.
Default is the dataset id.
force: `bool` (default: `False`)
If `True`, existing tar file will be overwritten.
quiet: `bool` (default: `False`)
If `True`, help messaged will be printed out.
"""
# make sure class is valid
self.check_validity()
# make sure tar file is valid
if self.tar_path is None:
raise ValueError(
"tar_path attribute is None, run ProcessedQuant.fetch_quant() method to fetch the tar file."
)
say(
quiet,
f"Decompressing the quant result of dataset #{self.dataset_id} using:\n {self.tar_path}",
)
# if quant_path is not None, return unless force=TRUE
if self.quant_path is not None:
if os.path.exists(self.tar_path) and (not force):
say(
quiet,
" - The quant_path attribute is not None and the path exists:",
)
say(quiet, f" {self.quant_path}")
say(quiet, " - pass force=True to decompress it again")
return
# check expected output dir
if quant_path_name is None:
quant_path_name = self.dataset_id
quant_parent_dir = os.path.join(quant_dir, f"{quant_path_name}")
if os.path.exists(quant_parent_dir):
if force:
say(quiet, " - Removing existing quant folder:")
say(quiet, f" {quant_parent_dir}")
shutil.rmtree(quant_parent_dir)
else:
say(quiet, " - Use the existing directory as quant_path:")
say(quiet, f" {quant_parent_dir}")
say(quiet, " - pass force=True to overwrite it")
self.quant_path = os.path.join(
quant_parent_dir, next(os.walk(quant_parent_dir))[1][0]
)
return
# decompress the tar file
tf = tarfile.open(self.tar_path)
tf.extractall(quant_parent_dir)
self.quant_path = os.path.join(
quant_parent_dir, next(os.walk(quant_parent_dir))[1][0]
)
say(quiet, " - Decompressed quant result is saved as:")
say(quiet, f" {self.quant_path}")
def load_quant(
self, output_format="scRNA", force=False, nonzero=False, quiet=False
):
"""
Load the quantification result as the `ProcessedQuant.anndata` attribute.\\
Parameters
----------
output_format: `str` or `dict` (default: `scRNA`)
A string represents one of the pre-defined output formats, which are "scRNA", "snRNA" and "velocity". \\
If a customized format of the returned `AnnData` is needed, one can pass a dictionary.\\
See [load_fry](https://github.com/COMBINE-lab/pyroe/blob/main/src/pyroe/load_fry.py) for details.
nonzero: `bool` (default: `False`)
If `True`, the genes that have zero expression across all cells will be removed.
quiet: `bool` (default: `False`)
If `True`, help messaged will not be printed out.
"""
self.check_validity()
# make sure quant dir is valid
if self.quant_path is None:
raise ValueError(
"The quant_path attribute is None, run ProcessedQuant.fetch_quant() and then ProcessedQuant.decompress_quant() to generate it."
)
if not os.path.exists(self.quant_path):
raise ValueError(
"The quant_path attribute is invalid, run ProcessedQuant.fetch_quant() and then ProcessedQuant.decompress_quant() to regenerate it."
)
if (self.anndata is not None) and (not force):
say(quiet, " - The anndata attribute is not None.")
say(quiet, " - pass force=True to update it")
return
say(quiet, f"Loading dataset #{self.dataset_id} from:")
say(quiet, f" {self.quant_path}")
self.anndata = load_fry(
frydir=self.quant_path,
output_format=output_format,
nonzero=nonzero,
quiet=quiet,
)
def FDL(
dataset_id: int,
tar_dir="quant_tar",
tar_file_name=None,
quant_dir="processed_quant",
quant_path_name=None,
output_format="scRNA",
nonzero=False,
force=False,
quiet=False,
):
"""
Call `ProcessedQuant.fetch_quant()`, ProcessedQuant.decompress_quant() and ProcessedQuant.load_quant() in turn
for a dataset to generate a complete ProcessedQuant object.
Parameters
-----------------------
dataset_id: `int`
The id of an available dataset
tar_dir: `str` (default: `quant_tar`)
The directory for saving the fetched tar file.
tar_file_name: `str` (default: dataset id)
Customized file name of the fetched tar file.
Default is the dataset id.
quant_dir: `str` (default: `processed_quant`)
The directory for saving decompressed quantification result folder.
quant_path_name: `str` (default: dataset id)
Customized folder name of the quantification result folder.
Default is the dataset id.
output_format: `str` or `dict` (default: `scRNA`)
A string represents one of the pre-defined output formats, which are "scRNA", "snRNA" and "velocity". \\
If a customized format of the returned `AnnData` is needed, one can pass a Dictionary.\\
See [load_fry](https://github.com/COMBINE-lab/pyroe/blob/main/src/pyroe/load_fry.py) for details.
nonzero: `bool` (default: `False`)
If `True`, existing tar file will be overwritten.
force: `bool` (default: `False`)
If `True`, existing tar file will be overwritten.
quiet: `bool` (default: `False`)
If `True`, help messaged will be printed out.
"""
processed_quant = ProcessedQuant(dataset_id)
# fetch it
processed_quant.fetch_quant(
tar_dir=tar_dir, file_name=tar_file_name, force=force, quiet=quiet
)
# decompress it
processed_quant.decompress_quant(
quant_dir=quant_dir,
quant_path_name=quant_path_name,
force=force,
quiet=quiet,
)
# load it
processed_quant.load_quant(
output_format=output_format, force=force, nonzero=nonzero, quiet=quiet
)
return processed_quant
def check_validity(self):
if (
self.quant_tar_url is None
or self.dataset_id is None
or self.chemistry is None
or self.reference is None
or self.dataset_name is None
or self.dataset_url is None
or self.fastq_url is None
or self.fastq_MD5sum is None
or self.delete_fastq is None
or self.feature_barcode_csv_url is None
or self.multiplexing_library_csv_url is None
or self.quant_tar_url is None
):
raise ValueError(
"Incomplete class object, use",
"ProcessedQuant(dataset_id)",
"to instantiate it.",
)
| en | 0.536597 | A class stores the information of the quantification result of a processed dataset get the dataframe in which each row contains the information of an available dataset that can be fetched. # load available dataset sheet Print the index and name of the available datasets. # get the info of the queried dataset id, python is zero based. Fetch processed quantification to a local directory.\\ The path to the fetched tar file will be sotred as the `ProcessedQuant.tar_path` attribute. Parameters ---------- tar_dir: `str` (default: `quant_tar`) The directory for saving the fetched tar file. file_name: `str` (default: dataset id) Customized file name of the fetched tar file. Default is the dataset id. force: `bool` (default: `False`) If `True`, any existing tar file will be overwritten. quiet: `bool` (default: `False`) If `True`, help messaged will be printed out. #{self.dataset_id}") # check whether tar file exist, # download it if needed # folder for (temporarily) storing tar files. # process file_name # update tar_path # download tar file Decompress the fetched quantification to a local directory.\\ The path to the decompressed quantification result will be sotred as the `ProcessedQuant.quant_path` attribute. Parameters ---------- quant_dir: `str` (default: `processed_quant`) The directory for saving decompressed quantification result folder. quant_path_name: `str` (default: dataset id) Customized folder name of the quantification result folder. Default is the dataset id. force: `bool` (default: `False`) If `True`, existing tar file will be overwritten. quiet: `bool` (default: `False`) If `True`, help messaged will be printed out. # make sure class is valid # make sure tar file is valid #{self.dataset_id} using:\n {self.tar_path}", # if quant_path is not None, return unless force=TRUE # check expected output dir # decompress the tar file Load the quantification result as the `ProcessedQuant.anndata` attribute.\\ Parameters ---------- output_format: `str` or `dict` (default: `scRNA`) A string represents one of the pre-defined output formats, which are "scRNA", "snRNA" and "velocity". \\ If a customized format of the returned `AnnData` is needed, one can pass a dictionary.\\ See [load_fry](https://github.com/COMBINE-lab/pyroe/blob/main/src/pyroe/load_fry.py) for details. nonzero: `bool` (default: `False`) If `True`, the genes that have zero expression across all cells will be removed. quiet: `bool` (default: `False`) If `True`, help messaged will not be printed out. # make sure quant dir is valid #{self.dataset_id} from:") Call `ProcessedQuant.fetch_quant()`, ProcessedQuant.decompress_quant() and ProcessedQuant.load_quant() in turn for a dataset to generate a complete ProcessedQuant object. Parameters ----------------------- dataset_id: `int` The id of an available dataset tar_dir: `str` (default: `quant_tar`) The directory for saving the fetched tar file. tar_file_name: `str` (default: dataset id) Customized file name of the fetched tar file. Default is the dataset id. quant_dir: `str` (default: `processed_quant`) The directory for saving decompressed quantification result folder. quant_path_name: `str` (default: dataset id) Customized folder name of the quantification result folder. Default is the dataset id. output_format: `str` or `dict` (default: `scRNA`) A string represents one of the pre-defined output formats, which are "scRNA", "snRNA" and "velocity". \\ If a customized format of the returned `AnnData` is needed, one can pass a Dictionary.\\ See [load_fry](https://github.com/COMBINE-lab/pyroe/blob/main/src/pyroe/load_fry.py) for details. nonzero: `bool` (default: `False`) If `True`, existing tar file will be overwritten. force: `bool` (default: `False`) If `True`, existing tar file will be overwritten. quiet: `bool` (default: `False`) If `True`, help messaged will be printed out. # fetch it # decompress it # load it | 3.206758 | 3 |
tao1/libs/shop/shop.py | MortalViews/tao1 | 25 | 6612576 | import json, cgi, os, sys, hashlib, time
from urllib.parse import *
from pymongo import *
from urllib import *
# from app.report.report import *
from datetime import datetime, timedelta
from libs.perm.perm import *
from libs.table.table import create_empty_row_
from libs.contents.contents import get_doc, get_mt
from core.core import *
def add_basket_post():
add_basket(get_post('ware_id'), int(get_post('quantity')))
return {"result": "ok", "quantity":basket_count(), "basket": basket_show()}
def add_basket(ware, quantity):
"""получает id товара и количество берет подробности о нем и заносит в сесии"""
s = session()
doc = get_doc(ware)
basket_check()
if not ware in s['basket']:
s['basket'][ware] = {'title': ct(doc['doc']['title']), 'price': doc['doc']['price'],
"amount": 0, 'quantity': 0, 'descr': doc['doc']['descr'],
"_id":doc['_id']
}
s['basket'][ware]['quantity'] += quantity
# die(doc['doc']['count_opt'])
if 'count_opt' in doc['doc'] and doc['doc']['count_opt'] and int(quantity) >= int(ct(doc['doc']['count_opt'])):
amount = float(quantity * doc['doc']['price_opt'])
s['basket'][ware]['amount'] = amount
s.save()
# die( s['basket'][ware]['amount'] )
else:
amount = float(quantity * doc['doc']['price'])
s['basket'][ware]['amount'] += amount
s.save()
def list_basket(request):
quantity = basket_count()
basket = basket_show()
amount = 0
# basket = {'1':'1'}
for i in basket:
# amount += float(basket[i]['quantity']) * float(basket[i]['price'])
amount += float(basket[i]['amount'])
# return templ('app.shop:list_basket', quantity = quantity, basket = basket, amount = amount )
return templ('libs.shop:list_basket', request, dict(quantity = quantity, basket = basket, amount = amount) )
def basket_context(request):
basket = get_const_value("is_basket")
u = urlparse(request.url)
basket_url = u.scheme + '://' + u.netloc + '/basket'
meta_doc = get_mt('des:client_order'); basket_map=None
if meta_doc:
meta_table = check_map_perm('des:order', meta_doc['field_map'])
basket_map = rec_data_t(meta_table)
return {'basket_url':basket_url, 'basket_map':basket_map, }
def clean_basket_post():
basket_clean(get_post('ware_id'))
return json.dumps({"result": "ok", "quantity":basket_count(), "basket": basket_show()})
def show_basket_post():
return json.dumps({"result": "ok", "quantity":basket_count(), "basket": basket_show()})
def make_order_post():
callback(get_post('phone'), get_settings('domain'), get_settings('basket', ''))
add_order(json.loads(get_post('data')))
return {"result":"ok"}
def add_order(request, data):
db = request.db
proc_id = 'des:order'; table_id = 'ware'
sub_data = basket_show()
doc_id = create_empty_row_(proc_id, data)
doc = get_doc(doc_id)
for i in sub_data:
new_id = doc['seq_id']
doc["seq_id"] = new_id+1
new_id = str(new_id)
doc['tables'][table_id][new_id] = sub_data[i]
db.doc.save(doc)
return {"result":"ok"}
def add_order_web_post():
""" web заказы -> на создание -> init_web_order(new_row)
web заказы -> на создание подтаблицы -> update_sum( owner, new_row)
web заказы -> на обновление подтаблицы -> update_sum( owner, new_row)
web заказы товары -> на создание -> update_price_column({}, new_row, doc['owner'])
price_changed( doc['owner'], {}, new_row, False)
web заказы товары -> на обновление -> update_price_column(old_row, new_row, doc['owner'])
price_changed(doc['owner'], old_row, new_row, False)
"""
phone = get_post('phone')
basket = get_post('basket', '')
callback(phone, get_settings('domain'), basket)
s = session()
basket_check()
if len(s['basket']):
owner = get_post('owner')
owner = create_row('des:web_order', None, defaults={'phone':phone})
amount = 0
for _id in s['basket']:
ware = s['basket'][_id]
doc_id = create_row('des:web_order_ware', owner, defaults={"title":ware['_id'], "quantity":ware['quantity'],
"price":ware['price']})
amount += ware['quantity'] * float(ware['price'])
if not doc_id: return '{"result":"fail", "error":"%s"}' %cgi.escape('updated', True)
update_row_( 'des:web_order', owner, {'amount':amount}, '_', no_synh=True)
wares_clean()
return {"result":"ok"}
def get_shop_filter(request):
db = request.db
aaa = []
for res in db.doc.find({"doc_type":"des:producer"}):
aaa.append({"id":res['_id'], "title":ct( res['doc']["title"]) })
return {'produced':aaa}
def basket_clean(ware):
basket_check()
s = session()
if ware in s['basket']:
del s['basket'][ware]
s.save()
def wares_clean():
basket_check()
s = session()
del s['basket']
s.save()
return {"result":"ok"}
def basket_show():
basket_check()
s = session()
return s['basket']
def basket_count():
"""щитает кол-во товаров в корзине"""
basket_check()
s = session(); summ = 0
for i in s['basket']:
summ += s['basket'][i]['quantity']
return summ
def basket_amount():
basket_check()
s = session(); summ = 0
for i in s['basket']:
summ += s['basket'][i]['quantity']*s['basket'][i]['price']
return summ
def basket_check():
s = session()
if not 'basket' in s:
s['basket'] = {}
s.save()
# =====================================================================================================================================
# ====================================== ADVANCED FILTER ===========================================================================
# =====================================================================================================================================
def ware_filter(filter):
# отфильтровует сами товары указаному списку атрибутов
if not isinstance(filter, list): filter = [filter]
categ = {}
for i in filter:
cat = i[:32]
attr = i[33:]
if not cat in categ: categ[cat] = []
categ[cat].append(attr)
cond = dict([('attr.'+i, {'$in': v}) for i, v in categ.items()])
#текущий вариант
# aaa = {'attr':{'diagonal':'17', 'korpus': 'metall'}}
# cond = {'attr.diagonal: {$in: [15, 17]}}
# cond = {'docs: {$in: [15, 17]}}
#текущий для агрегации
#db.test.aggregate({$unwind: "$likes"})
# {'docs':[{'id':1, 'cat': 'diagonal', 'attr':'17'}, {id:2, 'cat':'korpus', 'attr': 'metall'}] }
return cond
def get_ware_cls(request, cls):
""" получаем список для фильтра который справа показывается """
# получаем список категорий которые принадлежат например смартфон на выходе диагональ и тд.
# $cat =
# select c.* from ware_cat as c inner join on c.id = cc.owner ware_class_cat as cc where cc.owner = $cls
# {'doc_type':'ware_class_cat', 'owner':cls}{'doc_type':'ware_cat', '_id':{'$in':cat}}
# select a.* from ware_attr as a where owner in $cat
db = request.db; categ = []; list_cat = []
# собираем нужные данные, собираем фильтры принадлежащии классу
for res in db.doc.find({'doc_type':'des:ware_class_cat', 'owner':cls}):
list_cat.append(res['doc']['cat'])
# собираем фильтры атрибутов
for res in db.doc.find({'doc_type':'des:ware_cat', '_id':{'$in':list_cat}}):
cat = {'id':res['_id'], 'title':ct(res['doc']['title']), 'attr':[]}
categ.append(cat)
# идем по полученым фильтрам и собиарем атрибуты
for rs in db.doc.find({'doc_type':'des:ware_attr', 'owner': cat['id']}):
attr = {'id':rs['_id'], 'title':ct(rs['doc']['title'])}
cat['attr'].append(attr)
return categ
def list_ware(request, cls):
""" вызывается для показа списка товаров """
#ware_class_cat-справочник где хранятся категории которые относятся к классу ( класс-смартфон у него категория диагональ экрана )
# cats = [res['_id'] for res in db.doc.find({'doc_type':'ware_class_cat'})]
cond = {'doc_type':'des:ware', 'doc.class': cls, 'doc.pub':'true'}
if request.method == 'POST':
cond.update(ware_filter(get_post('cat', []))) # cond = {'attr.diagonal: {$in: [15, 17]}}
from libs.sites.sites import get_pagination, get_full_docs
pages, req = get_pagination(cond)
sort = ('doc.date', -1)
if sort: req.sort(*sort)
dv = get_full_docs(req)
filter = get_ware_cls(cls)
return templ('libs.shop:list_ware', request, dict(cls = cls, docs = dv, proc_id='des:ware', pages = pages, filter=filter) )
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
def list_class_post(cls):
pass
def list_ware_post(cls):
pass
def ware_page(request, doc_id):
u = urlparse(request.url)
url = u.scheme + '://' + u.hostname + u.path
data_tree = []
from libs.sites.sites import get_pagination, get_full_doc, get_full_docs
db = request.db
doc = get_full_doc(doc_id, img_ctr=4)
req_attr = db.doc.find({'doc_type':'des:ware_attr', 'owner':doc['_id']})
ware_attr = get_full_docs( db.doc.find({'doc_type':'des:ware_attr', 'owner':doc['_id']}) )
proc_id = doc['proc_id']
title = ct(doc['doc']['title']) if 'title' in doc['doc'] else ''
cls = doc['doc']['class']
req = db.doc.find( {'doc_type':'des:ware', '_id':{'$ne':doc['_id']}, 'doc.class':cls} ).limit(6)
similar = get_full_docs( req )
url1 = url
seo = db.doc.find_one({'doc.alias':'ware_page_seo'}, {'doc.description':1, 'doc.tags':1, 'doc.body':1, 'doc.footer':1, 'doc.add_title':1})
# if seo:
# seo = seo
# else: seo = ''
return templ('ware_page', request, dict(doc = doc, url = url1, doc_id=doc_id, proc_id=proc_id, similar = similar, seo=seo,
tree = data_tree, page_title=title, ware_attr=ware_attr)) #news_map=news_map, captcha=raw, hash=hash,
def count_ware_(request, cls):
db = request.db
ctr = db.doc.find({'doc_type':'des:ware', 'doc.class':cls}).count()
childs = db.doc.find_one({'_id':cls})
if not 'child' in childs: return ctr
for res in childs['child']:
ctr += count_ware(res)
return ctr
def count_ware(request, cls):
db = request.db
ctr = db.doc.find({'doc_type': 'des:ware', 'doc.class': cls}).count()
childs = db.doc.find_one({'_id': cls})
ctr += sum(count_ware(res) for res in childs.get('child', []))
return ctr
def get_navigate_(request, doc_id):
db = request.db; path = []
parent = db.doc.find_one({'child':{'$in':[doc_id]}}, {'parent':1, 'doc.alias':1})
if not parent: return []
else:
path.append(parent['doc']['alias'])
path = path + get_navigate_(parent['_id'])
return path
def get_navigate(request, doc_id):
db = request.db; path = []
parent = db.doc.find_one({'_id': doc_id}, {'parent':1, 'doc.alias':1, 'doc.title':1})
if not parent: return []
else:
path.append((parent['doc']['alias'], ct(parent['doc']['title'])))
path = path + get_navigate(parent['parent'])
return path
def get_filters(request, cls):
db = request.db
docs=[]
cursor = db.doc.aggregate([
# { '$match' : { 'doc_type' : "des:ware_attr", 'doc.class': { '$exists': True } } },
{ '$match' : { 'doc_type' : "des:ware_attr", 'doc.class': cls } },
{ '$project' : { 'title' : "$doc.title.ru", 'value':"$doc.attr_val.ru", 'class':"$doc.class", '_id':0 } },
{ '$group' : {'_id': {'class' :"$class", 'title': "$title"} , 'filters': { '$addToSet': "$value" } } },
{ '$group' : {'_id' :"$_id.class", 'title':{ '$addToSet': { 'title': "$_id.title", 'filters': "$filters" } } } }
])
for res in cursor:
docs.append(res)
return docs
def list_class(request, cls):
""" показывает список вложеных категорий и товаров для категорий
"""
from libs.sites.sites import get_pagination, get_full_docs, get_curr_img, get_full_doc
from libs.files.files import get_nf
db = request.db; clss = []
parent_id = db.doc.find_one({'doc_type':'des:ware_class', 'doc.alias':cls})
for doc in db.doc.find({'doc_type':'des:ware_class', 'parent':parent_id['_id']}).sort('doc.date', -1):
proc_id = doc['doc_type']
d_img = doc['default_img'] if 'default_img' in doc and doc['default_img'] else None
attachment = get_nf(proc_id, doc['_id'], 1)
data = doc['doc']
try:
count = count_ware(doc['_id'])
except: count='1'
full_doc = {"_id":doc['_id'], "id": doc['_id'],
'count':count,
"doc": data,
"att": attachment, "img":get_curr_img(doc, attachment), 'default_img':d_img, 'proc_id':proc_id}
clss.append(full_doc)
pages= ''
docs = get_full_docs(db.doc.find({'doc_type':'des:ware', 'doc.class':parent_id['_id']}).sort('doc.date', -1))
# docs = get_full_docs(req).sort('doc.date', -1)
filter = get_filters(parent_id['_id'])
# filter = get_ware_cls(cls)
parent_doc = get_full_doc(parent_id['_id'])
# seo = db.doc.find_one({'doc.alias':'class_seo'}, {'doc.title':1, 'doc.tags':1, 'doc.body':1, 'doc.footer':1, 'doc.add_title':1 })
seo = db.doc.find_one({'_id':parent_id['_id']}, {'doc.description':1, 'doc.tags':1, 'doc.footer':1 })
# seo = seo if 'doc' in seo else ''
return templ('list_class', request, dict(cls_docs = clss, cls=cls, docs = docs, proc_id='des:ware', pages = pages,
path=get_navigate(parent_id['_id']), parent_doc=parent_doc, filter=filter, seo=seo) )
def set_filters(request, cls, filters):
db = request.db
url = filters[1:]
url = url.split(';')
docs=[]; cond=[]; ds = {}; attr = []; data = []
for res in url:
res = res.replace('%20', ' ')
aaa = res.split('=');
key = aaa[0]; val = aaa[1]
if key in ds:
if type(ds[key]) == list: ds[key].append(val)
else: ds[key] = [ds[key], val]
else: ds.update({key:val})
for res in ds:
attr.append(res)
for res in ds.items():
if type(res[1]) == list: pr = {'doc.title.ru':res[0], 'doc.attr_val.ru':{'$in':res[1]}}
else: pr = {'doc.title.ru':res[0], 'doc.attr_val.ru':res[1]}
docs.append(pr)
cursor = db.doc.aggregate([
{ '$match' : { 'doc_type' : "des:ware_attr", 'doc.class':cls, '$or': docs} },
{ '$group' : { '_id': "$owner", "attr": { '$push': "$doc.title.ru" } } },
{ '$match' : { "attr": { '$all': attr } } },
{ '$project': {"_id":1 } }
])
for res in cursor:
cond.append(res)
if not len(cond): return None
from libs.sites.sites import get_full_docs
docs = get_full_docs(db.doc.find({ '$or':cond }).sort('doc.date', -1))
return docs
def list_filters(request, cls, filters):
""" если чтото выбрали для фильтров
"""
from libs.sites.sites import get_pagination, get_full_docs, get_curr_img, get_full_doc
from libs.files.files import get_nf
db = request.db; clss = []
parent_id = db.doc.find_one({'doc_type':'des:ware_class', 'doc.alias':cls})
for doc in db.doc.find({'doc_type':'des:ware_class', 'parent':parent_id['_id']}).sort('doc.date', -1):
proc_id = doc['doc_type']
attachment = get_nf(proc_id, doc['_id'], 1)
data = doc['doc']
try:
count = count_ware(doc['_id'])
except: count='1'
full_doc = {"_id":doc['_id'], "id": doc['_id'],
'count':count,
"doc": data,
"att": attachment, "img":get_curr_img(doc, attachment), 'proc_id':proc_id}
clss.append(full_doc)
pages= ''
docs = set_filters( parent_id['_id'], filters )
filter = get_filters(parent_id['_id'])
seo = db.doc.find_one({'doc.alias':'class_seo'}, {'doc.description':1, 'doc.tags':1, 'doc.body':1, 'doc.footer':1, 'doc.add_title':1 })
seo = seo if 'doc' in seo else ''
return templ('list_class', request, {'result':'ok', 'cls_docs':clss, 'cls':cls, 'docs':docs, 'proc_id':'des:ware', 'pages':pages,
'path':get_navigate(parent_id['_id']), 'parent_doc':get_full_doc(parent_id['_id']), 'filter':filter, 'seo':seo})
def get_list_filter(request, cls):
""" формируемая структура [{'id_class':'123', "filter_name":"name", attr:{'id_class':'123', 'title':'title'}]
"""
db = request.db; filters = []
for res in db.doc.find({ 'doc_type':'des:ware_filter', '$or':[{'doc.ware_class':cls}, {} ]}):
filters.append({'id_class':res['doc']['ware_class'], 'title':ct(res['doc']['title'])})
# users = [doc._id for doc in db.doc.find({"doc_type":'des:ware_filter', 'group': {'$all': ['administrator']}})]
users = [doc._id for doc in db.doc.find({"doc_type":'des:ware_filter', 'group': {'$all': ['administrator']}})]
articles = db.doc.find({"doc_type":'blogs', 'user': {'$in': users}})
return filters
def first_cls(request):
""" выводит корневые категории, в основном для главной страницы """
from libs.sites.sites import get_full_docs, get_curr_img
from libs.files.files import get_nf
db = request.db; docs = []
for doc in db.doc.find({'doc_type':'des:ware_class', 'parent':'_'}).sort('doc.date', -1):
proc_id = doc['doc_type']
attachment = get_nf(proc_id, doc['_id'], 1)
data = doc['doc']
try:
count = count_ware(doc['_id'])
except: count = '1'
full_doc = {"_id":doc['_id'], "id": doc['_id'],
'count':count,
"doc": data,
"att": attachment, "img":get_curr_img(doc, attachment), 'proc_id':proc_id}
docs.append(full_doc)
return docs
def list_ware_cls(request, full=False):
"""
получение колва докуентов
Для каждого класса находим сколько в нем документов
Назначаем их кол-во всем его родителям приплюсовыванием
:param выводить с дополнительной информацией типа картинок или просто названия, с доп. информацией выводится олько для главной
"""
db = request.db
docs = [res for res in db.doc.find({'doc_type':'des:ware_class'}, {'doc.title.ru':1, 'doc.alias':1, 'parent':1, 'child':1 }).sort('doc.date', -1) ]
# docs = [res for res in db.doc.find({'doc_type':'des:ware_class'}).sort('doc.date', -1) ]
if full:
docs = [res for res in db.doc.find({'doc_type':'des:ware_class'}).sort('doc.date', -1) ]
from libs.sites.sites import get_full_docs
docs = get_full_docs(docs)
return form_tree_( docs )
# return docs
# def form_tree_(docs):
# tree = {doc['_id']: doc for doc in docs}
# for doc in docs:
# if "child" in doc and doc['child'] != '_':
# doc['child'] = [tree[id] for id in doc['child']]
# docss = {"_id": "_", "child": [doc for doc in docs if "parent" not in doc or doc['parent']=='_']}
# return docss
def form_tree_(docs):
""" формирует из документов дерево
"""
tree = {doc['_id']: doc for doc in docs}
for doc in docs:
doc['child'] = []
for doc in docs:
parent = doc.get("parent", None)
if parent and parent != '_':
tree[parent]['child'].append(doc)
docss = {"_id": "_", "child": [doc for doc in docs if "parent" not in doc or doc['parent'] == '_']}
return docss
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
def list_orders(request):
from libs.sites.sites import get_full_docs
db = request.db
# web_order = db.doc.find({'doc_type':'web_order'})
# web_order_ware = db.doc.find({'doc_type':'web_order_ware'})
web_order = get_full_docs(db.doc.find({'doc_type':'des:web_order'}).limit(60).sort('doc.date', -1))
web_order_ware = get_full_docs(db.doc.find({'doc_type':'des:web_order_ware'}).limit(60).sort('doc.date', -1))
ware = get_full_docs(db.doc.find({'doc_type':'des:ware'}).limit(60).sort('doc.date', -1))
return templ('libs.shop:list_orders', request, dict(web_order = web_order, web_order_ware = web_order_ware, ware=ware))
def callback_post():
phone = get_post('phone')
basket = get_post('basket', '')
dom = get_settings('domain')
return callback(phone, dom, basket)
def callback(phone, dom, basket):
""" отправка sms с почты на телефон
"""
# phone = get_post('phone')
# dom = get_settings('domain')
# mail = '<EMAIL>'
# mail = '<EMAIL>'
# mail = '<EMAIL>'
# mail = get_const_value('callback_mail')
mail = get_settings('callback_mail')
create_row('des:phone', '_', defaults={'phone':phone})
text = u""" {0} """.format( phone )
if basket == 'true':
route_mail(mail, u'Cайт корзина ', text)
else:
route_mail(mail, u'Запрос на сайте ', text)
# text = u""" {0} -> {1}""".format( dom, phone )
# route_mail(mail, u'Запрос на сайте '+dom, text)
return {"result":"ok"}
| import json, cgi, os, sys, hashlib, time
from urllib.parse import *
from pymongo import *
from urllib import *
# from app.report.report import *
from datetime import datetime, timedelta
from libs.perm.perm import *
from libs.table.table import create_empty_row_
from libs.contents.contents import get_doc, get_mt
from core.core import *
def add_basket_post():
add_basket(get_post('ware_id'), int(get_post('quantity')))
return {"result": "ok", "quantity":basket_count(), "basket": basket_show()}
def add_basket(ware, quantity):
"""получает id товара и количество берет подробности о нем и заносит в сесии"""
s = session()
doc = get_doc(ware)
basket_check()
if not ware in s['basket']:
s['basket'][ware] = {'title': ct(doc['doc']['title']), 'price': doc['doc']['price'],
"amount": 0, 'quantity': 0, 'descr': doc['doc']['descr'],
"_id":doc['_id']
}
s['basket'][ware]['quantity'] += quantity
# die(doc['doc']['count_opt'])
if 'count_opt' in doc['doc'] and doc['doc']['count_opt'] and int(quantity) >= int(ct(doc['doc']['count_opt'])):
amount = float(quantity * doc['doc']['price_opt'])
s['basket'][ware]['amount'] = amount
s.save()
# die( s['basket'][ware]['amount'] )
else:
amount = float(quantity * doc['doc']['price'])
s['basket'][ware]['amount'] += amount
s.save()
def list_basket(request):
quantity = basket_count()
basket = basket_show()
amount = 0
# basket = {'1':'1'}
for i in basket:
# amount += float(basket[i]['quantity']) * float(basket[i]['price'])
amount += float(basket[i]['amount'])
# return templ('app.shop:list_basket', quantity = quantity, basket = basket, amount = amount )
return templ('libs.shop:list_basket', request, dict(quantity = quantity, basket = basket, amount = amount) )
def basket_context(request):
basket = get_const_value("is_basket")
u = urlparse(request.url)
basket_url = u.scheme + '://' + u.netloc + '/basket'
meta_doc = get_mt('des:client_order'); basket_map=None
if meta_doc:
meta_table = check_map_perm('des:order', meta_doc['field_map'])
basket_map = rec_data_t(meta_table)
return {'basket_url':basket_url, 'basket_map':basket_map, }
def clean_basket_post():
basket_clean(get_post('ware_id'))
return json.dumps({"result": "ok", "quantity":basket_count(), "basket": basket_show()})
def show_basket_post():
return json.dumps({"result": "ok", "quantity":basket_count(), "basket": basket_show()})
def make_order_post():
callback(get_post('phone'), get_settings('domain'), get_settings('basket', ''))
add_order(json.loads(get_post('data')))
return {"result":"ok"}
def add_order(request, data):
db = request.db
proc_id = 'des:order'; table_id = 'ware'
sub_data = basket_show()
doc_id = create_empty_row_(proc_id, data)
doc = get_doc(doc_id)
for i in sub_data:
new_id = doc['seq_id']
doc["seq_id"] = new_id+1
new_id = str(new_id)
doc['tables'][table_id][new_id] = sub_data[i]
db.doc.save(doc)
return {"result":"ok"}
def add_order_web_post():
""" web заказы -> на создание -> init_web_order(new_row)
web заказы -> на создание подтаблицы -> update_sum( owner, new_row)
web заказы -> на обновление подтаблицы -> update_sum( owner, new_row)
web заказы товары -> на создание -> update_price_column({}, new_row, doc['owner'])
price_changed( doc['owner'], {}, new_row, False)
web заказы товары -> на обновление -> update_price_column(old_row, new_row, doc['owner'])
price_changed(doc['owner'], old_row, new_row, False)
"""
phone = get_post('phone')
basket = get_post('basket', '')
callback(phone, get_settings('domain'), basket)
s = session()
basket_check()
if len(s['basket']):
owner = get_post('owner')
owner = create_row('des:web_order', None, defaults={'phone':phone})
amount = 0
for _id in s['basket']:
ware = s['basket'][_id]
doc_id = create_row('des:web_order_ware', owner, defaults={"title":ware['_id'], "quantity":ware['quantity'],
"price":ware['price']})
amount += ware['quantity'] * float(ware['price'])
if not doc_id: return '{"result":"fail", "error":"%s"}' %cgi.escape('updated', True)
update_row_( 'des:web_order', owner, {'amount':amount}, '_', no_synh=True)
wares_clean()
return {"result":"ok"}
def get_shop_filter(request):
db = request.db
aaa = []
for res in db.doc.find({"doc_type":"des:producer"}):
aaa.append({"id":res['_id'], "title":ct( res['doc']["title"]) })
return {'produced':aaa}
def basket_clean(ware):
basket_check()
s = session()
if ware in s['basket']:
del s['basket'][ware]
s.save()
def wares_clean():
basket_check()
s = session()
del s['basket']
s.save()
return {"result":"ok"}
def basket_show():
basket_check()
s = session()
return s['basket']
def basket_count():
"""щитает кол-во товаров в корзине"""
basket_check()
s = session(); summ = 0
for i in s['basket']:
summ += s['basket'][i]['quantity']
return summ
def basket_amount():
basket_check()
s = session(); summ = 0
for i in s['basket']:
summ += s['basket'][i]['quantity']*s['basket'][i]['price']
return summ
def basket_check():
s = session()
if not 'basket' in s:
s['basket'] = {}
s.save()
# =====================================================================================================================================
# ====================================== ADVANCED FILTER ===========================================================================
# =====================================================================================================================================
def ware_filter(filter):
# отфильтровует сами товары указаному списку атрибутов
if not isinstance(filter, list): filter = [filter]
categ = {}
for i in filter:
cat = i[:32]
attr = i[33:]
if not cat in categ: categ[cat] = []
categ[cat].append(attr)
cond = dict([('attr.'+i, {'$in': v}) for i, v in categ.items()])
#текущий вариант
# aaa = {'attr':{'diagonal':'17', 'korpus': 'metall'}}
# cond = {'attr.diagonal: {$in: [15, 17]}}
# cond = {'docs: {$in: [15, 17]}}
#текущий для агрегации
#db.test.aggregate({$unwind: "$likes"})
# {'docs':[{'id':1, 'cat': 'diagonal', 'attr':'17'}, {id:2, 'cat':'korpus', 'attr': 'metall'}] }
return cond
def get_ware_cls(request, cls):
""" получаем список для фильтра который справа показывается """
# получаем список категорий которые принадлежат например смартфон на выходе диагональ и тд.
# $cat =
# select c.* from ware_cat as c inner join on c.id = cc.owner ware_class_cat as cc where cc.owner = $cls
# {'doc_type':'ware_class_cat', 'owner':cls}{'doc_type':'ware_cat', '_id':{'$in':cat}}
# select a.* from ware_attr as a where owner in $cat
db = request.db; categ = []; list_cat = []
# собираем нужные данные, собираем фильтры принадлежащии классу
for res in db.doc.find({'doc_type':'des:ware_class_cat', 'owner':cls}):
list_cat.append(res['doc']['cat'])
# собираем фильтры атрибутов
for res in db.doc.find({'doc_type':'des:ware_cat', '_id':{'$in':list_cat}}):
cat = {'id':res['_id'], 'title':ct(res['doc']['title']), 'attr':[]}
categ.append(cat)
# идем по полученым фильтрам и собиарем атрибуты
for rs in db.doc.find({'doc_type':'des:ware_attr', 'owner': cat['id']}):
attr = {'id':rs['_id'], 'title':ct(rs['doc']['title'])}
cat['attr'].append(attr)
return categ
def list_ware(request, cls):
""" вызывается для показа списка товаров """
#ware_class_cat-справочник где хранятся категории которые относятся к классу ( класс-смартфон у него категория диагональ экрана )
# cats = [res['_id'] for res in db.doc.find({'doc_type':'ware_class_cat'})]
cond = {'doc_type':'des:ware', 'doc.class': cls, 'doc.pub':'true'}
if request.method == 'POST':
cond.update(ware_filter(get_post('cat', []))) # cond = {'attr.diagonal: {$in: [15, 17]}}
from libs.sites.sites import get_pagination, get_full_docs
pages, req = get_pagination(cond)
sort = ('doc.date', -1)
if sort: req.sort(*sort)
dv = get_full_docs(req)
filter = get_ware_cls(cls)
return templ('libs.shop:list_ware', request, dict(cls = cls, docs = dv, proc_id='des:ware', pages = pages, filter=filter) )
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
def list_class_post(cls):
pass
def list_ware_post(cls):
pass
def ware_page(request, doc_id):
u = urlparse(request.url)
url = u.scheme + '://' + u.hostname + u.path
data_tree = []
from libs.sites.sites import get_pagination, get_full_doc, get_full_docs
db = request.db
doc = get_full_doc(doc_id, img_ctr=4)
req_attr = db.doc.find({'doc_type':'des:ware_attr', 'owner':doc['_id']})
ware_attr = get_full_docs( db.doc.find({'doc_type':'des:ware_attr', 'owner':doc['_id']}) )
proc_id = doc['proc_id']
title = ct(doc['doc']['title']) if 'title' in doc['doc'] else ''
cls = doc['doc']['class']
req = db.doc.find( {'doc_type':'des:ware', '_id':{'$ne':doc['_id']}, 'doc.class':cls} ).limit(6)
similar = get_full_docs( req )
url1 = url
seo = db.doc.find_one({'doc.alias':'ware_page_seo'}, {'doc.description':1, 'doc.tags':1, 'doc.body':1, 'doc.footer':1, 'doc.add_title':1})
# if seo:
# seo = seo
# else: seo = ''
return templ('ware_page', request, dict(doc = doc, url = url1, doc_id=doc_id, proc_id=proc_id, similar = similar, seo=seo,
tree = data_tree, page_title=title, ware_attr=ware_attr)) #news_map=news_map, captcha=raw, hash=hash,
def count_ware_(request, cls):
db = request.db
ctr = db.doc.find({'doc_type':'des:ware', 'doc.class':cls}).count()
childs = db.doc.find_one({'_id':cls})
if not 'child' in childs: return ctr
for res in childs['child']:
ctr += count_ware(res)
return ctr
def count_ware(request, cls):
db = request.db
ctr = db.doc.find({'doc_type': 'des:ware', 'doc.class': cls}).count()
childs = db.doc.find_one({'_id': cls})
ctr += sum(count_ware(res) for res in childs.get('child', []))
return ctr
def get_navigate_(request, doc_id):
db = request.db; path = []
parent = db.doc.find_one({'child':{'$in':[doc_id]}}, {'parent':1, 'doc.alias':1})
if not parent: return []
else:
path.append(parent['doc']['alias'])
path = path + get_navigate_(parent['_id'])
return path
def get_navigate(request, doc_id):
db = request.db; path = []
parent = db.doc.find_one({'_id': doc_id}, {'parent':1, 'doc.alias':1, 'doc.title':1})
if not parent: return []
else:
path.append((parent['doc']['alias'], ct(parent['doc']['title'])))
path = path + get_navigate(parent['parent'])
return path
def get_filters(request, cls):
db = request.db
docs=[]
cursor = db.doc.aggregate([
# { '$match' : { 'doc_type' : "des:ware_attr", 'doc.class': { '$exists': True } } },
{ '$match' : { 'doc_type' : "des:ware_attr", 'doc.class': cls } },
{ '$project' : { 'title' : "$doc.title.ru", 'value':"$doc.attr_val.ru", 'class':"$doc.class", '_id':0 } },
{ '$group' : {'_id': {'class' :"$class", 'title': "$title"} , 'filters': { '$addToSet': "$value" } } },
{ '$group' : {'_id' :"$_id.class", 'title':{ '$addToSet': { 'title': "$_id.title", 'filters': "$filters" } } } }
])
for res in cursor:
docs.append(res)
return docs
def list_class(request, cls):
""" показывает список вложеных категорий и товаров для категорий
"""
from libs.sites.sites import get_pagination, get_full_docs, get_curr_img, get_full_doc
from libs.files.files import get_nf
db = request.db; clss = []
parent_id = db.doc.find_one({'doc_type':'des:ware_class', 'doc.alias':cls})
for doc in db.doc.find({'doc_type':'des:ware_class', 'parent':parent_id['_id']}).sort('doc.date', -1):
proc_id = doc['doc_type']
d_img = doc['default_img'] if 'default_img' in doc and doc['default_img'] else None
attachment = get_nf(proc_id, doc['_id'], 1)
data = doc['doc']
try:
count = count_ware(doc['_id'])
except: count='1'
full_doc = {"_id":doc['_id'], "id": doc['_id'],
'count':count,
"doc": data,
"att": attachment, "img":get_curr_img(doc, attachment), 'default_img':d_img, 'proc_id':proc_id}
clss.append(full_doc)
pages= ''
docs = get_full_docs(db.doc.find({'doc_type':'des:ware', 'doc.class':parent_id['_id']}).sort('doc.date', -1))
# docs = get_full_docs(req).sort('doc.date', -1)
filter = get_filters(parent_id['_id'])
# filter = get_ware_cls(cls)
parent_doc = get_full_doc(parent_id['_id'])
# seo = db.doc.find_one({'doc.alias':'class_seo'}, {'doc.title':1, 'doc.tags':1, 'doc.body':1, 'doc.footer':1, 'doc.add_title':1 })
seo = db.doc.find_one({'_id':parent_id['_id']}, {'doc.description':1, 'doc.tags':1, 'doc.footer':1 })
# seo = seo if 'doc' in seo else ''
return templ('list_class', request, dict(cls_docs = clss, cls=cls, docs = docs, proc_id='des:ware', pages = pages,
path=get_navigate(parent_id['_id']), parent_doc=parent_doc, filter=filter, seo=seo) )
def set_filters(request, cls, filters):
db = request.db
url = filters[1:]
url = url.split(';')
docs=[]; cond=[]; ds = {}; attr = []; data = []
for res in url:
res = res.replace('%20', ' ')
aaa = res.split('=');
key = aaa[0]; val = aaa[1]
if key in ds:
if type(ds[key]) == list: ds[key].append(val)
else: ds[key] = [ds[key], val]
else: ds.update({key:val})
for res in ds:
attr.append(res)
for res in ds.items():
if type(res[1]) == list: pr = {'doc.title.ru':res[0], 'doc.attr_val.ru':{'$in':res[1]}}
else: pr = {'doc.title.ru':res[0], 'doc.attr_val.ru':res[1]}
docs.append(pr)
cursor = db.doc.aggregate([
{ '$match' : { 'doc_type' : "des:ware_attr", 'doc.class':cls, '$or': docs} },
{ '$group' : { '_id': "$owner", "attr": { '$push': "$doc.title.ru" } } },
{ '$match' : { "attr": { '$all': attr } } },
{ '$project': {"_id":1 } }
])
for res in cursor:
cond.append(res)
if not len(cond): return None
from libs.sites.sites import get_full_docs
docs = get_full_docs(db.doc.find({ '$or':cond }).sort('doc.date', -1))
return docs
def list_filters(request, cls, filters):
""" если чтото выбрали для фильтров
"""
from libs.sites.sites import get_pagination, get_full_docs, get_curr_img, get_full_doc
from libs.files.files import get_nf
db = request.db; clss = []
parent_id = db.doc.find_one({'doc_type':'des:ware_class', 'doc.alias':cls})
for doc in db.doc.find({'doc_type':'des:ware_class', 'parent':parent_id['_id']}).sort('doc.date', -1):
proc_id = doc['doc_type']
attachment = get_nf(proc_id, doc['_id'], 1)
data = doc['doc']
try:
count = count_ware(doc['_id'])
except: count='1'
full_doc = {"_id":doc['_id'], "id": doc['_id'],
'count':count,
"doc": data,
"att": attachment, "img":get_curr_img(doc, attachment), 'proc_id':proc_id}
clss.append(full_doc)
pages= ''
docs = set_filters( parent_id['_id'], filters )
filter = get_filters(parent_id['_id'])
seo = db.doc.find_one({'doc.alias':'class_seo'}, {'doc.description':1, 'doc.tags':1, 'doc.body':1, 'doc.footer':1, 'doc.add_title':1 })
seo = seo if 'doc' in seo else ''
return templ('list_class', request, {'result':'ok', 'cls_docs':clss, 'cls':cls, 'docs':docs, 'proc_id':'des:ware', 'pages':pages,
'path':get_navigate(parent_id['_id']), 'parent_doc':get_full_doc(parent_id['_id']), 'filter':filter, 'seo':seo})
def get_list_filter(request, cls):
""" формируемая структура [{'id_class':'123', "filter_name":"name", attr:{'id_class':'123', 'title':'title'}]
"""
db = request.db; filters = []
for res in db.doc.find({ 'doc_type':'des:ware_filter', '$or':[{'doc.ware_class':cls}, {} ]}):
filters.append({'id_class':res['doc']['ware_class'], 'title':ct(res['doc']['title'])})
# users = [doc._id for doc in db.doc.find({"doc_type":'des:ware_filter', 'group': {'$all': ['administrator']}})]
users = [doc._id for doc in db.doc.find({"doc_type":'des:ware_filter', 'group': {'$all': ['administrator']}})]
articles = db.doc.find({"doc_type":'blogs', 'user': {'$in': users}})
return filters
def first_cls(request):
""" выводит корневые категории, в основном для главной страницы """
from libs.sites.sites import get_full_docs, get_curr_img
from libs.files.files import get_nf
db = request.db; docs = []
for doc in db.doc.find({'doc_type':'des:ware_class', 'parent':'_'}).sort('doc.date', -1):
proc_id = doc['doc_type']
attachment = get_nf(proc_id, doc['_id'], 1)
data = doc['doc']
try:
count = count_ware(doc['_id'])
except: count = '1'
full_doc = {"_id":doc['_id'], "id": doc['_id'],
'count':count,
"doc": data,
"att": attachment, "img":get_curr_img(doc, attachment), 'proc_id':proc_id}
docs.append(full_doc)
return docs
def list_ware_cls(request, full=False):
"""
получение колва докуентов
Для каждого класса находим сколько в нем документов
Назначаем их кол-во всем его родителям приплюсовыванием
:param выводить с дополнительной информацией типа картинок или просто названия, с доп. информацией выводится олько для главной
"""
db = request.db
docs = [res for res in db.doc.find({'doc_type':'des:ware_class'}, {'doc.title.ru':1, 'doc.alias':1, 'parent':1, 'child':1 }).sort('doc.date', -1) ]
# docs = [res for res in db.doc.find({'doc_type':'des:ware_class'}).sort('doc.date', -1) ]
if full:
docs = [res for res in db.doc.find({'doc_type':'des:ware_class'}).sort('doc.date', -1) ]
from libs.sites.sites import get_full_docs
docs = get_full_docs(docs)
return form_tree_( docs )
# return docs
# def form_tree_(docs):
# tree = {doc['_id']: doc for doc in docs}
# for doc in docs:
# if "child" in doc and doc['child'] != '_':
# doc['child'] = [tree[id] for id in doc['child']]
# docss = {"_id": "_", "child": [doc for doc in docs if "parent" not in doc or doc['parent']=='_']}
# return docss
def form_tree_(docs):
""" формирует из документов дерево
"""
tree = {doc['_id']: doc for doc in docs}
for doc in docs:
doc['child'] = []
for doc in docs:
parent = doc.get("parent", None)
if parent and parent != '_':
tree[parent]['child'].append(doc)
docss = {"_id": "_", "child": [doc for doc in docs if "parent" not in doc or doc['parent'] == '_']}
return docss
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
def list_orders(request):
from libs.sites.sites import get_full_docs
db = request.db
# web_order = db.doc.find({'doc_type':'web_order'})
# web_order_ware = db.doc.find({'doc_type':'web_order_ware'})
web_order = get_full_docs(db.doc.find({'doc_type':'des:web_order'}).limit(60).sort('doc.date', -1))
web_order_ware = get_full_docs(db.doc.find({'doc_type':'des:web_order_ware'}).limit(60).sort('doc.date', -1))
ware = get_full_docs(db.doc.find({'doc_type':'des:ware'}).limit(60).sort('doc.date', -1))
return templ('libs.shop:list_orders', request, dict(web_order = web_order, web_order_ware = web_order_ware, ware=ware))
def callback_post():
phone = get_post('phone')
basket = get_post('basket', '')
dom = get_settings('domain')
return callback(phone, dom, basket)
def callback(phone, dom, basket):
""" отправка sms с почты на телефон
"""
# phone = get_post('phone')
# dom = get_settings('domain')
# mail = '<EMAIL>'
# mail = '<EMAIL>'
# mail = '<EMAIL>'
# mail = get_const_value('callback_mail')
mail = get_settings('callback_mail')
create_row('des:phone', '_', defaults={'phone':phone})
text = u""" {0} """.format( phone )
if basket == 'true':
route_mail(mail, u'Cайт корзина ', text)
else:
route_mail(mail, u'Запрос на сайте ', text)
# text = u""" {0} -> {1}""".format( dom, phone )
# route_mail(mail, u'Запрос на сайте '+dom, text)
return {"result":"ok"}
| ru | 0.300706 | # from app.report.report import * получает id товара и количество берет подробности о нем и заносит в сесии # die(doc['doc']['count_opt']) # die( s['basket'][ware]['amount'] ) # basket = {'1':'1'} # amount += float(basket[i]['quantity']) * float(basket[i]['price']) # return templ('app.shop:list_basket', quantity = quantity, basket = basket, amount = amount ) web заказы -> на создание -> init_web_order(new_row) web заказы -> на создание подтаблицы -> update_sum( owner, new_row) web заказы -> на обновление подтаблицы -> update_sum( owner, new_row) web заказы товары -> на создание -> update_price_column({}, new_row, doc['owner']) price_changed( doc['owner'], {}, new_row, False) web заказы товары -> на обновление -> update_price_column(old_row, new_row, doc['owner']) price_changed(doc['owner'], old_row, new_row, False) щитает кол-во товаров в корзине # ===================================================================================================================================== # ====================================== ADVANCED FILTER =========================================================================== # ===================================================================================================================================== # отфильтровует сами товары указаному списку атрибутов #текущий вариант # aaa = {'attr':{'diagonal':'17', 'korpus': 'metall'}} # cond = {'attr.diagonal: {$in: [15, 17]}} # cond = {'docs: {$in: [15, 17]}} #текущий для агрегации #db.test.aggregate({$unwind: "$likes"}) # {'docs':[{'id':1, 'cat': 'diagonal', 'attr':'17'}, {id:2, 'cat':'korpus', 'attr': 'metall'}] } получаем список для фильтра который справа показывается # получаем список категорий которые принадлежат например смартфон на выходе диагональ и тд. # $cat = # select c.* from ware_cat as c inner join on c.id = cc.owner ware_class_cat as cc where cc.owner = $cls # {'doc_type':'ware_class_cat', 'owner':cls}{'doc_type':'ware_cat', '_id':{'$in':cat}} # select a.* from ware_attr as a where owner in $cat # собираем нужные данные, собираем фильтры принадлежащии классу # собираем фильтры атрибутов # идем по полученым фильтрам и собиарем атрибуты вызывается для показа списка товаров #ware_class_cat-справочник где хранятся категории которые относятся к классу ( класс-смартфон у него категория диагональ экрана ) # cats = [res['_id'] for res in db.doc.find({'doc_type':'ware_class_cat'})] # cond = {'attr.diagonal: {$in: [15, 17]}} # ====================================================================================================================== # ====================================================================================================================== # ====================================================================================================================== # if seo: # seo = seo # else: seo = '' #news_map=news_map, captcha=raw, hash=hash, # { '$match' : { 'doc_type' : "des:ware_attr", 'doc.class': { '$exists': True } } }, показывает список вложеных категорий и товаров для категорий # docs = get_full_docs(req).sort('doc.date', -1) # filter = get_ware_cls(cls) # seo = db.doc.find_one({'doc.alias':'class_seo'}, {'doc.title':1, 'doc.tags':1, 'doc.body':1, 'doc.footer':1, 'doc.add_title':1 }) # seo = seo if 'doc' in seo else '' если чтото выбрали для фильтров формируемая структура [{'id_class':'123', "filter_name":"name", attr:{'id_class':'123', 'title':'title'}] # users = [doc._id for doc in db.doc.find({"doc_type":'des:ware_filter', 'group': {'$all': ['administrator']}})] выводит корневые категории, в основном для главной страницы получение колва докуентов Для каждого класса находим сколько в нем документов Назначаем их кол-во всем его родителям приплюсовыванием :param выводить с дополнительной информацией типа картинок или просто названия, с доп. информацией выводится олько для главной # docs = [res for res in db.doc.find({'doc_type':'des:ware_class'}).sort('doc.date', -1) ] # return docs # def form_tree_(docs): # tree = {doc['_id']: doc for doc in docs} # for doc in docs: # if "child" in doc and doc['child'] != '_': # doc['child'] = [tree[id] for id in doc['child']] # docss = {"_id": "_", "child": [doc for doc in docs if "parent" not in doc or doc['parent']=='_']} # return docss формирует из документов дерево # ====================================================================================================================== # ====================================================================================================================== # ====================================================================================================================== # web_order = db.doc.find({'doc_type':'web_order'}) # web_order_ware = db.doc.find({'doc_type':'web_order_ware'}) отправка sms с почты на телефон # phone = get_post('phone') # dom = get_settings('domain') # mail = '<EMAIL>' # mail = '<EMAIL>' # mail = '<EMAIL>' # mail = get_const_value('callback_mail') {0} # text = u""" {0} -> {1}""".format( dom, phone ) # route_mail(mail, u'Запрос на сайте '+dom, text) | 2.308893 | 2 |
STS-AssumeRole-cnRegion-toPublish.py | hawkey999/Custom-Federation-Broker-access-AWS-Console | 0 | 6612577 | '''
该示例是在AWS中国区临时委派一个Role给临时用户,不需要为该用户建IAM User,也不用登录
可以直接通过以下代码生成的URL link直接访问console
参考官方文档:
https://docs.aws.amazon.com/zh_cn/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html#STSConsoleLink_programPython
原文档是针对AWS Global区的,以下示例修改为针对AWS 北京区,endpoint和console/signin的URL不同
(如果是宁夏区,则把37行enpoint_url修改为sts.cn-nortwest-1.amazonaws.com.cn)
原文档是Python2+老的boto,现在修改为Python3.6+boto3
注意:运行example的本机需要配置有credential和默认region,可以通过AWS CLI配置:
aws configure
或者配置~/.aws下的config和credentials文件
注意:assume的role不要是Role里面那个默认的Admin,要Admin也自己建一个,因为信任实体不同
'''
import boto3
import urllib
import json
import requests # 'pip install requests'
# # AWS SDK for Python (Boto) 'pip install boto'
# from boto3.sts import STSConnection
# # Step 1: Authenticate user in your own identity system.
# # Step 2: Using the access keys for an IAM user in your AWS account,
# # call "AssumeRole" to get temporary access keys for the federated user
# # Note: Calls to AWS STS AssumeRole must be signed using the access key ID
# # and secret access key of an IAM user or using existing temporary credentials.
# # The credentials can be in EC2 instance metadata, in environment variables,
# # or in a configuration file, and will be discovered automatically by the
# # STSConnection() function. For more information, see the Python SDK docs:
# # http://boto.readthedocs.org/en/latest/boto_config_tut.html
# sts_connection = STSConnection()
sts = boto3.client(
'sts',
endpoint_url="https://sts.cn-north-1.amazonaws.com.cn",
)
# assumed_role_object = sts.get_federation_token(
# Name='<PASSWORD>'
# )
assumed_role_object = sts.assume_role(
RoleArn="<Your Role ARN>",
RoleSessionName="AssumeRoleSession1"
)
print(assumed_role_object)
# Step 3: Format resulting temporary credentials into JSON
json_string_with_temp_credentials = '{'
json_string_with_temp_credentials += '"sessionId":"' + \
assumed_role_object['Credentials']['AccessKeyId'] + '",'
json_string_with_temp_credentials += '"sessionKey":"' + \
assumed_role_object['Credentials']['SecretAccessKey'] + '",'
json_string_with_temp_credentials += '"sessionToken":"' + \
assumed_role_object['Credentials']['SessionToken'] + '"'
json_string_with_temp_credentials += '}'
# Step 4. Make request to AWS federation endpoint to get sign-in token. Construct the parameter string with
# the sign-in action request, a 12-hour session duration, and the JSON document with temporary credentials
# as parameters.
request_parameters = "?Action=getSigninToken"
request_parameters += "&SessionDuration=43200"
request_parameters += "&Session=" + \
urllib.parse.quote_plus(json_string_with_temp_credentials)
request_url = "https://signin.amazonaws.cn/federation" + request_parameters
r = requests.get(request_url)
# Returns a JSON document with a single element named SigninToken.
signin_token = json.loads(r.text)
# Step 5: Create URL where users can use the sign-in token to sign in to
# the console. This URL must be used within 15 minutes after the
# sign-in token was issued.
request_parameters = "?Action=login"
request_parameters += "&Issuer=Example.org"
request_parameters += "&Destination=" + \
urllib.parse.quote_plus("https://console.amazonaws.cn/")
request_parameters += "&SigninToken=" + signin_token["SigninToken"]
request_url = "https://signin.amazonaws.cn/federation" + request_parameters
# Send final URL to stdout
print (request_url)
| '''
该示例是在AWS中国区临时委派一个Role给临时用户,不需要为该用户建IAM User,也不用登录
可以直接通过以下代码生成的URL link直接访问console
参考官方文档:
https://docs.aws.amazon.com/zh_cn/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html#STSConsoleLink_programPython
原文档是针对AWS Global区的,以下示例修改为针对AWS 北京区,endpoint和console/signin的URL不同
(如果是宁夏区,则把37行enpoint_url修改为sts.cn-nortwest-1.amazonaws.com.cn)
原文档是Python2+老的boto,现在修改为Python3.6+boto3
注意:运行example的本机需要配置有credential和默认region,可以通过AWS CLI配置:
aws configure
或者配置~/.aws下的config和credentials文件
注意:assume的role不要是Role里面那个默认的Admin,要Admin也自己建一个,因为信任实体不同
'''
import boto3
import urllib
import json
import requests # 'pip install requests'
# # AWS SDK for Python (Boto) 'pip install boto'
# from boto3.sts import STSConnection
# # Step 1: Authenticate user in your own identity system.
# # Step 2: Using the access keys for an IAM user in your AWS account,
# # call "AssumeRole" to get temporary access keys for the federated user
# # Note: Calls to AWS STS AssumeRole must be signed using the access key ID
# # and secret access key of an IAM user or using existing temporary credentials.
# # The credentials can be in EC2 instance metadata, in environment variables,
# # or in a configuration file, and will be discovered automatically by the
# # STSConnection() function. For more information, see the Python SDK docs:
# # http://boto.readthedocs.org/en/latest/boto_config_tut.html
# sts_connection = STSConnection()
sts = boto3.client(
'sts',
endpoint_url="https://sts.cn-north-1.amazonaws.com.cn",
)
# assumed_role_object = sts.get_federation_token(
# Name='<PASSWORD>'
# )
assumed_role_object = sts.assume_role(
RoleArn="<Your Role ARN>",
RoleSessionName="AssumeRoleSession1"
)
print(assumed_role_object)
# Step 3: Format resulting temporary credentials into JSON
json_string_with_temp_credentials = '{'
json_string_with_temp_credentials += '"sessionId":"' + \
assumed_role_object['Credentials']['AccessKeyId'] + '",'
json_string_with_temp_credentials += '"sessionKey":"' + \
assumed_role_object['Credentials']['SecretAccessKey'] + '",'
json_string_with_temp_credentials += '"sessionToken":"' + \
assumed_role_object['Credentials']['SessionToken'] + '"'
json_string_with_temp_credentials += '}'
# Step 4. Make request to AWS federation endpoint to get sign-in token. Construct the parameter string with
# the sign-in action request, a 12-hour session duration, and the JSON document with temporary credentials
# as parameters.
request_parameters = "?Action=getSigninToken"
request_parameters += "&SessionDuration=43200"
request_parameters += "&Session=" + \
urllib.parse.quote_plus(json_string_with_temp_credentials)
request_url = "https://signin.amazonaws.cn/federation" + request_parameters
r = requests.get(request_url)
# Returns a JSON document with a single element named SigninToken.
signin_token = json.loads(r.text)
# Step 5: Create URL where users can use the sign-in token to sign in to
# the console. This URL must be used within 15 minutes after the
# sign-in token was issued.
request_parameters = "?Action=login"
request_parameters += "&Issuer=Example.org"
request_parameters += "&Destination=" + \
urllib.parse.quote_plus("https://console.amazonaws.cn/")
request_parameters += "&SigninToken=" + signin_token["SigninToken"]
request_url = "https://signin.amazonaws.cn/federation" + request_parameters
# Send final URL to stdout
print (request_url)
| en | 0.531034 | 该示例是在AWS中国区临时委派一个Role给临时用户,不需要为该用户建IAM User,也不用登录 可以直接通过以下代码生成的URL link直接访问console 参考官方文档: https://docs.aws.amazon.com/zh_cn/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html#STSConsoleLink_programPython 原文档是针对AWS Global区的,以下示例修改为针对AWS 北京区,endpoint和console/signin的URL不同 (如果是宁夏区,则把37行enpoint_url修改为sts.cn-nortwest-1.amazonaws.com.cn) 原文档是Python2+老的boto,现在修改为Python3.6+boto3 注意:运行example的本机需要配置有credential和默认region,可以通过AWS CLI配置: aws configure 或者配置~/.aws下的config和credentials文件 注意:assume的role不要是Role里面那个默认的Admin,要Admin也自己建一个,因为信任实体不同 # 'pip install requests' # # AWS SDK for Python (Boto) 'pip install boto' # from boto3.sts import STSConnection # # Step 1: Authenticate user in your own identity system. # # Step 2: Using the access keys for an IAM user in your AWS account, # # call "AssumeRole" to get temporary access keys for the federated user # # Note: Calls to AWS STS AssumeRole must be signed using the access key ID # # and secret access key of an IAM user or using existing temporary credentials. # # The credentials can be in EC2 instance metadata, in environment variables, # # or in a configuration file, and will be discovered automatically by the # # STSConnection() function. For more information, see the Python SDK docs: # # http://boto.readthedocs.org/en/latest/boto_config_tut.html # sts_connection = STSConnection() # assumed_role_object = sts.get_federation_token( # Name='<PASSWORD>' # ) # Step 3: Format resulting temporary credentials into JSON # Step 4. Make request to AWS federation endpoint to get sign-in token. Construct the parameter string with # the sign-in action request, a 12-hour session duration, and the JSON document with temporary credentials # as parameters. # Returns a JSON document with a single element named SigninToken. # Step 5: Create URL where users can use the sign-in token to sign in to # the console. This URL must be used within 15 minutes after the # sign-in token was issued. # Send final URL to stdout | 2.888957 | 3 |
getconfig.py | coffiasd/code_realease | 0 | 6612578 | <reponame>coffiasd/code_realease<gh_stars>0
import os
from configparser import ConfigParser
# 项目路径
#rootDir = os.path.split(os.path.realpath(__file__))[0]
# config.ini文件路径
#configFilePath = os.path.join(rootDir, 'config.ini')
configFilePath = 'config.ini'
def get_config_values(section, option):
"""
根据传入的section获取对应的value
:param section: ini配置文件中用[]标识的内容
:return:
"""
config = ConfigParser()
config.read(configFilePath, encoding="utf-8-sig")
# return config.items(section=section)
return config.get(section=section, option=option)
def set_config_values(section,option,val):
config = ConfigParser()
config.read(configFilePath, encoding="utf-8-sig")
config.set(section=section,option=option,value=val)
config.write(open(configFilePath, "w"))
| import os
from configparser import ConfigParser
# 项目路径
#rootDir = os.path.split(os.path.realpath(__file__))[0]
# config.ini文件路径
#configFilePath = os.path.join(rootDir, 'config.ini')
configFilePath = 'config.ini'
def get_config_values(section, option):
"""
根据传入的section获取对应的value
:param section: ini配置文件中用[]标识的内容
:return:
"""
config = ConfigParser()
config.read(configFilePath, encoding="utf-8-sig")
# return config.items(section=section)
return config.get(section=section, option=option)
def set_config_values(section,option,val):
config = ConfigParser()
config.read(configFilePath, encoding="utf-8-sig")
config.set(section=section,option=option,value=val)
config.write(open(configFilePath, "w")) | zh | 0.199669 | # 项目路径 #rootDir = os.path.split(os.path.realpath(__file__))[0] # config.ini文件路径 #configFilePath = os.path.join(rootDir, 'config.ini') 根据传入的section获取对应的value :param section: ini配置文件中用[]标识的内容 :return: # return config.items(section=section) | 2.550377 | 3 |
simple.py | szels/recommender_system | 0 | 6612579 | <gh_stars>0
# read https://www.datacamp.com/community/tutorials/recommender-systems-python
import pandas as pd
# Load movies metadata
metadata = pd.read_csv('../data/movies_metadata.csv', low_memory=False)
#print metadata.head(3)
# C is the mean vote across the whole report
C = metadata['vote_average'].mean()
#print C
# m is the minimum votes required to be listed in the chart
m = metadata['vote_count'].quantile(0.9)
#print m
# Filter out all qualified movies into a new DataFrame
q_movies = metadata.copy().loc[metadata['vote_count'] >= m]
#print q_movies.shape
# Function that computes the weighted rating of each movie
def weighted_rating(x, m=m, C=C):
v = x['vote_count']
R = x['vote_average']
# Calculation based on the IMDB formula
return (v/(v+m) * R) + (m/(m+v) * C)
# Define a new feature 'score' and calculate its value with `weighted_rating()`
q_movies['score'] = q_movies.apply(weighted_rating, axis=1)
#Sort movies based on score calculated above
q_movies = q_movies.sort_values('score', ascending=False)
#Print the top 15 movies
print q_movies[['title', 'vote_count', 'vote_average', 'score']].head(15)
| # read https://www.datacamp.com/community/tutorials/recommender-systems-python
import pandas as pd
# Load movies metadata
metadata = pd.read_csv('../data/movies_metadata.csv', low_memory=False)
#print metadata.head(3)
# C is the mean vote across the whole report
C = metadata['vote_average'].mean()
#print C
# m is the minimum votes required to be listed in the chart
m = metadata['vote_count'].quantile(0.9)
#print m
# Filter out all qualified movies into a new DataFrame
q_movies = metadata.copy().loc[metadata['vote_count'] >= m]
#print q_movies.shape
# Function that computes the weighted rating of each movie
def weighted_rating(x, m=m, C=C):
v = x['vote_count']
R = x['vote_average']
# Calculation based on the IMDB formula
return (v/(v+m) * R) + (m/(m+v) * C)
# Define a new feature 'score' and calculate its value with `weighted_rating()`
q_movies['score'] = q_movies.apply(weighted_rating, axis=1)
#Sort movies based on score calculated above
q_movies = q_movies.sort_values('score', ascending=False)
#Print the top 15 movies
print q_movies[['title', 'vote_count', 'vote_average', 'score']].head(15) | en | 0.825238 | # read https://www.datacamp.com/community/tutorials/recommender-systems-python # Load movies metadata #print metadata.head(3) # C is the mean vote across the whole report #print C # m is the minimum votes required to be listed in the chart #print m # Filter out all qualified movies into a new DataFrame #print q_movies.shape # Function that computes the weighted rating of each movie # Calculation based on the IMDB formula # Define a new feature 'score' and calculate its value with `weighted_rating()` #Sort movies based on score calculated above #Print the top 15 movies | 3.786873 | 4 |
packages/flask_app/cellar/google.py | mattotodd/docker-cellar-panel | 0 | 6612580 | <filename>packages/flask_app/cellar/google.py<gh_stars>0
import httplib2
import os, json
from base64 import b64decode
from googleapiclient import discovery
from google.oauth2 import service_account
scopes = ["https://www.googleapis.com/auth/drive", "https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/spreadsheets"]
service_info = json.loads(b64decode(os.environ['GOOGLE_SERVICE_AUTH']))
credentials = service_account.Credentials.from_service_account_info(service_info, scopes=scopes)
service = discovery.build('sheets', 'v4', credentials=credentials)
gsheets = service.spreadsheets()
PRODUCTION_SPREADSHEET_ID = os.environ['PRODUCTION_SPREADSHEET_ID']
MAIN_SHEET_NAME = os.environ['MAIN_SHEET_NAME']
def get_sheet_values(spreadsheet_id=PRODUCTION_SPREADSHEET_ID, sheet_name=MAIN_SHEET_NAME, limit=''):
get_range = "%s!A1:M%s" % (sheet_name, limit)
request = gsheets.values().get(spreadsheetId=spreadsheet_id, range=get_range)
response = request.execute()
if 'values' not in response:
return []
keys = response['values'][0]
rows = []
for row in response['values'][1:]:
batch = {}
for idx, value in enumerate(row):
batch[keys[idx]] = value
rows.append(batch)
return rows
| <filename>packages/flask_app/cellar/google.py<gh_stars>0
import httplib2
import os, json
from base64 import b64decode
from googleapiclient import discovery
from google.oauth2 import service_account
scopes = ["https://www.googleapis.com/auth/drive", "https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/spreadsheets"]
service_info = json.loads(b64decode(os.environ['GOOGLE_SERVICE_AUTH']))
credentials = service_account.Credentials.from_service_account_info(service_info, scopes=scopes)
service = discovery.build('sheets', 'v4', credentials=credentials)
gsheets = service.spreadsheets()
PRODUCTION_SPREADSHEET_ID = os.environ['PRODUCTION_SPREADSHEET_ID']
MAIN_SHEET_NAME = os.environ['MAIN_SHEET_NAME']
def get_sheet_values(spreadsheet_id=PRODUCTION_SPREADSHEET_ID, sheet_name=MAIN_SHEET_NAME, limit=''):
get_range = "%s!A1:M%s" % (sheet_name, limit)
request = gsheets.values().get(spreadsheetId=spreadsheet_id, range=get_range)
response = request.execute()
if 'values' not in response:
return []
keys = response['values'][0]
rows = []
for row in response['values'][1:]:
batch = {}
for idx, value in enumerate(row):
batch[keys[idx]] = value
rows.append(batch)
return rows
| none | 1 | 2.602622 | 3 | |
coverage-3.7.1/tests/test_farm.py | I-Valchev/UrPas | 1 | 6612581 | """Run tests in the farm subdirectory. Designed for nose."""
import difflib, filecmp, fnmatch, glob, os, re, shutil, sys
from nose.plugins.skip import SkipTest
from tests.backtest import run_command, execfile # pylint: disable=W0622
from coverage.control import _TEST_NAME_FILE
def test_farm(clean_only=False):
"""A test-generating function for nose to find and run."""
for fname in glob.glob("tests/farm/*/*.py"):
case = FarmTestCase(fname, clean_only)
yield (case,)
class FarmTestCase(object):
"""A test case from the farm tree.
Tests are short Python script files, often called run.py:
copy("src", "out")
run('''
coverage -x white.py
coverage -a white.py
''', rundir="out")
compare("out", "gold", "*,cover")
clean("out")
Verbs (copy, run, compare, clean) are methods in this class. FarmTestCase
has options to allow various uses of the test cases (normal execution,
cleaning-only, or run and leave the results for debugging).
"""
def __init__(self, runpy, clean_only=False, dont_clean=False):
"""Create a test case from a run.py file.
`clean_only` means that only the clean() action is executed.
`dont_clean` means that the clean() action is not executed.
"""
self.description = runpy
self.dir, self.runpy = os.path.split(runpy)
self.clean_only = clean_only
self.dont_clean = dont_clean
def cd(self, newdir):
"""Change the current directory, and return the old one."""
cwd = os.getcwd()
os.chdir(newdir)
return cwd
def addtopath(self, directory):
"""Add `directory` to the path, and return the old path."""
oldpath = sys.path[:]
if directory is not None:
sys.path.insert(0, directory)
return oldpath
def restorepath(self, path):
"""Restore the system path to `path`."""
sys.path = path
def __call__(self):
"""Execute the test from the run.py file.
"""
if _TEST_NAME_FILE:
f = open(_TEST_NAME_FILE, "w")
f.write(self.description.replace("/", "_"))
f.close()
cwd = self.cd(self.dir)
# Prepare a dictionary of globals for the run.py files to use.
fns = """
copy run runfunc compare contains doesnt_contain clean skip
""".split()
if self.clean_only:
glo = dict([(fn, self.noop) for fn in fns])
glo['clean'] = self.clean
else:
glo = dict([(fn, getattr(self, fn)) for fn in fns])
if self.dont_clean: # pragma: not covered
glo['clean'] = self.noop
old_mods = dict(sys.modules)
try:
execfile(self.runpy, glo)
finally:
self.cd(cwd)
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
to_del = [m for m in sys.modules if m not in old_mods]
for m in to_del:
del sys.modules[m]
def run_fully(self): # pragma: not covered
"""Run as a full test case, with setUp and tearDown."""
self.setUp()
try:
self()
finally:
self.tearDown()
def fnmatch_list(self, files, file_pattern):
"""Filter the list of `files` to only those that match `file_pattern`.
If `file_pattern` is None, then return the entire list of files.
Returns a list of the filtered files.
"""
if file_pattern:
files = [f for f in files if fnmatch.fnmatch(f, file_pattern)]
return files
def setUp(self):
"""Test set up, run by nose before __call__."""
# Modules should be importable from the current directory.
self.old_syspath = sys.path[:]
sys.path.insert(0, '')
def tearDown(self):
"""Test tear down, run by nose after __call__."""
# Make sure no matter what, the test is cleaned up.
if not self.dont_clean: # pragma: part covered
self.clean_only = True
self()
# Restore the original sys.path
sys.path = self.old_syspath
# Functions usable inside farm run.py files
def noop(self, *args, **kwargs):
"""A no-op function to stub out run, copy, etc, when only cleaning."""
pass
def copy(self, src, dst):
"""Copy a directory."""
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
def run(self, cmds, rundir="src", outfile=None):
"""Run a list of commands.
`cmds` is a string, commands separated by newlines.
`rundir` is the directory in which to run the commands.
`outfile` is a filename to redirect stdout to.
"""
cwd = self.cd(rundir)
if outfile:
fout = open(outfile, "a+")
try:
for cmd in cmds.split("\n"):
cmd = cmd.strip()
if not cmd:
continue
retcode, output = run_command(cmd)
print(output.rstrip())
if outfile:
fout.write(output)
if retcode:
raise Exception("command exited abnormally")
finally:
if outfile:
fout.close()
self.cd(cwd)
def runfunc(self, fn, rundir="src", addtopath=None):
"""Run a function.
`fn` is a callable.
`rundir` is the directory in which to run the function.
"""
cwd = self.cd(rundir)
oldpath = self.addtopath(addtopath)
try:
fn()
finally:
self.cd(cwd)
self.restorepath(oldpath)
def compare(self, dir1, dir2, file_pattern=None, size_within=0,
left_extra=False, right_extra=False, scrubs=None
):
"""Compare files matching `file_pattern` in `dir1` and `dir2`.
`dir2` is interpreted as a prefix, with Python version numbers appended
to find the actual directory to compare with. "foo" will compare
against "foo_v241", "foo_v24", "foo_v2", or "foo", depending on which
directory is found first.
`size_within` is a percentage delta for the file sizes. If non-zero,
then the file contents are not compared (since they are expected to
often be different), but the file sizes must be within this amount.
For example, size_within=10 means that the two files' sizes must be
within 10 percent of each other to compare equal.
`left_extra` true means the left directory can have extra files in it
without triggering an assertion. `right_extra` means the right
directory can.
`scrubs` is a list of pairs, regex find and replace patterns to use to
scrub the files of unimportant differences.
An assertion will be raised if the directories fail one of their
matches.
"""
# Search for a dir2 with a version suffix.
version_suff = ''.join(map(str, sys.version_info[:3]))
while version_suff:
trydir = dir2 + '_v' + version_suff
if os.path.exists(trydir):
dir2 = trydir
break
version_suff = version_suff[:-1]
assert os.path.exists(dir1), "Left directory missing: %s" % dir1
assert os.path.exists(dir2), "Right directory missing: %s" % dir2
dc = filecmp.dircmp(dir1, dir2)
diff_files = self.fnmatch_list(dc.diff_files, file_pattern)
left_only = self.fnmatch_list(dc.left_only, file_pattern)
right_only = self.fnmatch_list(dc.right_only, file_pattern)
if size_within:
# The files were already compared, use the diff_files list as a
# guide for size comparison.
wrong_size = []
for f in diff_files:
left = open(os.path.join(dir1, f), "rb").read()
right = open(os.path.join(dir2, f), "rb").read()
size_l, size_r = len(left), len(right)
big, little = max(size_l, size_r), min(size_l, size_r)
if (big - little) / float(little) > size_within/100.0:
# print "%d %d" % (big, little)
# print "Left: ---\n%s\n-----\n%s" % (left, right)
wrong_size.append(f)
assert not wrong_size, (
"File sizes differ between %s and %s: %s" % (
dir1, dir2, wrong_size
))
else:
# filecmp only compares in binary mode, but we want text mode. So
# look through the list of different files, and compare them
# ourselves.
text_diff = []
for f in diff_files:
left = open(os.path.join(dir1, f), "rU").readlines()
right = open(os.path.join(dir2, f), "rU").readlines()
if scrubs:
left = self._scrub(left, scrubs)
right = self._scrub(right, scrubs)
if left != right:
text_diff.append(f)
print("".join(list(difflib.Differ().compare(left, right))))
assert not text_diff, "Files differ: %s" % text_diff
if not left_extra:
assert not left_only, "Files in %s only: %s" % (dir1, left_only)
if not right_extra:
assert not right_only, "Files in %s only: %s" % (dir2, right_only)
def _scrub(self, strlist, scrubs):
"""Scrub uninteresting data from the strings in `strlist`.
`scrubs is a list of (find, replace) pairs of regexes that are used on
each string in `strlist`. A list of scrubbed strings is returned.
"""
scrubbed = []
for s in strlist:
for rgx_find, rgx_replace in scrubs:
s = re.sub(rgx_find, rgx_replace, s)
scrubbed.append(s)
return scrubbed
def contains(self, filename, *strlist):
"""Check that the file contains all of a list of strings.
An assert will be raised if one of the arguments in `strlist` is
missing in `filename`.
"""
text = open(filename, "r").read()
for s in strlist:
assert s in text, "Missing content in %s: %r" % (filename, s)
def doesnt_contain(self, filename, *strlist):
"""Check that the file contains none of a list of strings.
An assert will be raised if any of the strings in strlist appears in
`filename`.
"""
text = open(filename, "r").read()
for s in strlist:
assert s not in text, "Forbidden content in %s: %r" % (filename, s)
def clean(self, cleandir):
"""Clean `cleandir` by removing it and all its children completely."""
# rmtree gives mysterious failures on Win7, so retry a "few" times.
# I've seen it take over 100 tries, so, 1000! This is probably the
# most unpleasant hack I've written in a long time...
tries = 1000
while tries: # pragma: part covered
if os.path.exists(cleandir):
try:
shutil.rmtree(cleandir)
except OSError: # pragma: not covered
if tries == 1:
raise
else:
tries -= 1
continue
break
def skip(self, msg=None):
"""Skip the current test."""
raise SkipTest(msg)
def main(): # pragma: not covered
"""Command-line access to test_farm.
Commands:
run testcase - Run a single test case.
out testcase - Run a test case, but don't clean up, to see the output.
clean - Clean all the output for all tests.
"""
op = 'help'
try:
op = sys.argv[1]
except IndexError:
pass
if op == 'run':
# Run the test for real.
case = FarmTestCase(sys.argv[2])
case.run_fully()
elif op == 'out':
# Run the test, but don't clean up, so we can examine the output.
case = FarmTestCase(sys.argv[2], dont_clean=True)
case.run_fully()
elif op == 'clean':
# Run all the tests, but just clean.
for test in test_farm(clean_only=True):
test[0].run_fully()
else:
print(main.__doc__)
# So that we can run just one farm run.py at a time.
if __name__ == '__main__':
main()
| """Run tests in the farm subdirectory. Designed for nose."""
import difflib, filecmp, fnmatch, glob, os, re, shutil, sys
from nose.plugins.skip import SkipTest
from tests.backtest import run_command, execfile # pylint: disable=W0622
from coverage.control import _TEST_NAME_FILE
def test_farm(clean_only=False):
"""A test-generating function for nose to find and run."""
for fname in glob.glob("tests/farm/*/*.py"):
case = FarmTestCase(fname, clean_only)
yield (case,)
class FarmTestCase(object):
"""A test case from the farm tree.
Tests are short Python script files, often called run.py:
copy("src", "out")
run('''
coverage -x white.py
coverage -a white.py
''', rundir="out")
compare("out", "gold", "*,cover")
clean("out")
Verbs (copy, run, compare, clean) are methods in this class. FarmTestCase
has options to allow various uses of the test cases (normal execution,
cleaning-only, or run and leave the results for debugging).
"""
def __init__(self, runpy, clean_only=False, dont_clean=False):
"""Create a test case from a run.py file.
`clean_only` means that only the clean() action is executed.
`dont_clean` means that the clean() action is not executed.
"""
self.description = runpy
self.dir, self.runpy = os.path.split(runpy)
self.clean_only = clean_only
self.dont_clean = dont_clean
def cd(self, newdir):
"""Change the current directory, and return the old one."""
cwd = os.getcwd()
os.chdir(newdir)
return cwd
def addtopath(self, directory):
"""Add `directory` to the path, and return the old path."""
oldpath = sys.path[:]
if directory is not None:
sys.path.insert(0, directory)
return oldpath
def restorepath(self, path):
"""Restore the system path to `path`."""
sys.path = path
def __call__(self):
"""Execute the test from the run.py file.
"""
if _TEST_NAME_FILE:
f = open(_TEST_NAME_FILE, "w")
f.write(self.description.replace("/", "_"))
f.close()
cwd = self.cd(self.dir)
# Prepare a dictionary of globals for the run.py files to use.
fns = """
copy run runfunc compare contains doesnt_contain clean skip
""".split()
if self.clean_only:
glo = dict([(fn, self.noop) for fn in fns])
glo['clean'] = self.clean
else:
glo = dict([(fn, getattr(self, fn)) for fn in fns])
if self.dont_clean: # pragma: not covered
glo['clean'] = self.noop
old_mods = dict(sys.modules)
try:
execfile(self.runpy, glo)
finally:
self.cd(cwd)
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
to_del = [m for m in sys.modules if m not in old_mods]
for m in to_del:
del sys.modules[m]
def run_fully(self): # pragma: not covered
"""Run as a full test case, with setUp and tearDown."""
self.setUp()
try:
self()
finally:
self.tearDown()
def fnmatch_list(self, files, file_pattern):
"""Filter the list of `files` to only those that match `file_pattern`.
If `file_pattern` is None, then return the entire list of files.
Returns a list of the filtered files.
"""
if file_pattern:
files = [f for f in files if fnmatch.fnmatch(f, file_pattern)]
return files
def setUp(self):
"""Test set up, run by nose before __call__."""
# Modules should be importable from the current directory.
self.old_syspath = sys.path[:]
sys.path.insert(0, '')
def tearDown(self):
"""Test tear down, run by nose after __call__."""
# Make sure no matter what, the test is cleaned up.
if not self.dont_clean: # pragma: part covered
self.clean_only = True
self()
# Restore the original sys.path
sys.path = self.old_syspath
# Functions usable inside farm run.py files
def noop(self, *args, **kwargs):
"""A no-op function to stub out run, copy, etc, when only cleaning."""
pass
def copy(self, src, dst):
"""Copy a directory."""
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
def run(self, cmds, rundir="src", outfile=None):
"""Run a list of commands.
`cmds` is a string, commands separated by newlines.
`rundir` is the directory in which to run the commands.
`outfile` is a filename to redirect stdout to.
"""
cwd = self.cd(rundir)
if outfile:
fout = open(outfile, "a+")
try:
for cmd in cmds.split("\n"):
cmd = cmd.strip()
if not cmd:
continue
retcode, output = run_command(cmd)
print(output.rstrip())
if outfile:
fout.write(output)
if retcode:
raise Exception("command exited abnormally")
finally:
if outfile:
fout.close()
self.cd(cwd)
def runfunc(self, fn, rundir="src", addtopath=None):
"""Run a function.
`fn` is a callable.
`rundir` is the directory in which to run the function.
"""
cwd = self.cd(rundir)
oldpath = self.addtopath(addtopath)
try:
fn()
finally:
self.cd(cwd)
self.restorepath(oldpath)
def compare(self, dir1, dir2, file_pattern=None, size_within=0,
left_extra=False, right_extra=False, scrubs=None
):
"""Compare files matching `file_pattern` in `dir1` and `dir2`.
`dir2` is interpreted as a prefix, with Python version numbers appended
to find the actual directory to compare with. "foo" will compare
against "foo_v241", "foo_v24", "foo_v2", or "foo", depending on which
directory is found first.
`size_within` is a percentage delta for the file sizes. If non-zero,
then the file contents are not compared (since they are expected to
often be different), but the file sizes must be within this amount.
For example, size_within=10 means that the two files' sizes must be
within 10 percent of each other to compare equal.
`left_extra` true means the left directory can have extra files in it
without triggering an assertion. `right_extra` means the right
directory can.
`scrubs` is a list of pairs, regex find and replace patterns to use to
scrub the files of unimportant differences.
An assertion will be raised if the directories fail one of their
matches.
"""
# Search for a dir2 with a version suffix.
version_suff = ''.join(map(str, sys.version_info[:3]))
while version_suff:
trydir = dir2 + '_v' + version_suff
if os.path.exists(trydir):
dir2 = trydir
break
version_suff = version_suff[:-1]
assert os.path.exists(dir1), "Left directory missing: %s" % dir1
assert os.path.exists(dir2), "Right directory missing: %s" % dir2
dc = filecmp.dircmp(dir1, dir2)
diff_files = self.fnmatch_list(dc.diff_files, file_pattern)
left_only = self.fnmatch_list(dc.left_only, file_pattern)
right_only = self.fnmatch_list(dc.right_only, file_pattern)
if size_within:
# The files were already compared, use the diff_files list as a
# guide for size comparison.
wrong_size = []
for f in diff_files:
left = open(os.path.join(dir1, f), "rb").read()
right = open(os.path.join(dir2, f), "rb").read()
size_l, size_r = len(left), len(right)
big, little = max(size_l, size_r), min(size_l, size_r)
if (big - little) / float(little) > size_within/100.0:
# print "%d %d" % (big, little)
# print "Left: ---\n%s\n-----\n%s" % (left, right)
wrong_size.append(f)
assert not wrong_size, (
"File sizes differ between %s and %s: %s" % (
dir1, dir2, wrong_size
))
else:
# filecmp only compares in binary mode, but we want text mode. So
# look through the list of different files, and compare them
# ourselves.
text_diff = []
for f in diff_files:
left = open(os.path.join(dir1, f), "rU").readlines()
right = open(os.path.join(dir2, f), "rU").readlines()
if scrubs:
left = self._scrub(left, scrubs)
right = self._scrub(right, scrubs)
if left != right:
text_diff.append(f)
print("".join(list(difflib.Differ().compare(left, right))))
assert not text_diff, "Files differ: %s" % text_diff
if not left_extra:
assert not left_only, "Files in %s only: %s" % (dir1, left_only)
if not right_extra:
assert not right_only, "Files in %s only: %s" % (dir2, right_only)
def _scrub(self, strlist, scrubs):
"""Scrub uninteresting data from the strings in `strlist`.
`scrubs is a list of (find, replace) pairs of regexes that are used on
each string in `strlist`. A list of scrubbed strings is returned.
"""
scrubbed = []
for s in strlist:
for rgx_find, rgx_replace in scrubs:
s = re.sub(rgx_find, rgx_replace, s)
scrubbed.append(s)
return scrubbed
def contains(self, filename, *strlist):
"""Check that the file contains all of a list of strings.
An assert will be raised if one of the arguments in `strlist` is
missing in `filename`.
"""
text = open(filename, "r").read()
for s in strlist:
assert s in text, "Missing content in %s: %r" % (filename, s)
def doesnt_contain(self, filename, *strlist):
"""Check that the file contains none of a list of strings.
An assert will be raised if any of the strings in strlist appears in
`filename`.
"""
text = open(filename, "r").read()
for s in strlist:
assert s not in text, "Forbidden content in %s: %r" % (filename, s)
def clean(self, cleandir):
"""Clean `cleandir` by removing it and all its children completely."""
# rmtree gives mysterious failures on Win7, so retry a "few" times.
# I've seen it take over 100 tries, so, 1000! This is probably the
# most unpleasant hack I've written in a long time...
tries = 1000
while tries: # pragma: part covered
if os.path.exists(cleandir):
try:
shutil.rmtree(cleandir)
except OSError: # pragma: not covered
if tries == 1:
raise
else:
tries -= 1
continue
break
def skip(self, msg=None):
"""Skip the current test."""
raise SkipTest(msg)
def main(): # pragma: not covered
"""Command-line access to test_farm.
Commands:
run testcase - Run a single test case.
out testcase - Run a test case, but don't clean up, to see the output.
clean - Clean all the output for all tests.
"""
op = 'help'
try:
op = sys.argv[1]
except IndexError:
pass
if op == 'run':
# Run the test for real.
case = FarmTestCase(sys.argv[2])
case.run_fully()
elif op == 'out':
# Run the test, but don't clean up, so we can examine the output.
case = FarmTestCase(sys.argv[2], dont_clean=True)
case.run_fully()
elif op == 'clean':
# Run all the tests, but just clean.
for test in test_farm(clean_only=True):
test[0].run_fully()
else:
print(main.__doc__)
# So that we can run just one farm run.py at a time.
if __name__ == '__main__':
main()
| en | 0.89143 | Run tests in the farm subdirectory. Designed for nose. # pylint: disable=W0622 A test-generating function for nose to find and run. A test case from the farm tree. Tests are short Python script files, often called run.py: copy("src", "out") run(''' coverage -x white.py coverage -a white.py ''', rundir="out") compare("out", "gold", "*,cover") clean("out") Verbs (copy, run, compare, clean) are methods in this class. FarmTestCase has options to allow various uses of the test cases (normal execution, cleaning-only, or run and leave the results for debugging). Create a test case from a run.py file. `clean_only` means that only the clean() action is executed. `dont_clean` means that the clean() action is not executed. Change the current directory, and return the old one. Add `directory` to the path, and return the old path. Restore the system path to `path`. Execute the test from the run.py file. # Prepare a dictionary of globals for the run.py files to use. copy run runfunc compare contains doesnt_contain clean skip # pragma: not covered # Remove any new modules imported during the test run. This lets us # import the same source files for more than one test. # pragma: not covered Run as a full test case, with setUp and tearDown. Filter the list of `files` to only those that match `file_pattern`. If `file_pattern` is None, then return the entire list of files. Returns a list of the filtered files. Test set up, run by nose before __call__. # Modules should be importable from the current directory. Test tear down, run by nose after __call__. # Make sure no matter what, the test is cleaned up. # pragma: part covered # Restore the original sys.path # Functions usable inside farm run.py files A no-op function to stub out run, copy, etc, when only cleaning. Copy a directory. Run a list of commands. `cmds` is a string, commands separated by newlines. `rundir` is the directory in which to run the commands. `outfile` is a filename to redirect stdout to. Run a function. `fn` is a callable. `rundir` is the directory in which to run the function. Compare files matching `file_pattern` in `dir1` and `dir2`. `dir2` is interpreted as a prefix, with Python version numbers appended to find the actual directory to compare with. "foo" will compare against "foo_v241", "foo_v24", "foo_v2", or "foo", depending on which directory is found first. `size_within` is a percentage delta for the file sizes. If non-zero, then the file contents are not compared (since they are expected to often be different), but the file sizes must be within this amount. For example, size_within=10 means that the two files' sizes must be within 10 percent of each other to compare equal. `left_extra` true means the left directory can have extra files in it without triggering an assertion. `right_extra` means the right directory can. `scrubs` is a list of pairs, regex find and replace patterns to use to scrub the files of unimportant differences. An assertion will be raised if the directories fail one of their matches. # Search for a dir2 with a version suffix. # The files were already compared, use the diff_files list as a # guide for size comparison. # print "%d %d" % (big, little) # print "Left: ---\n%s\n-----\n%s" % (left, right) # filecmp only compares in binary mode, but we want text mode. So # look through the list of different files, and compare them # ourselves. Scrub uninteresting data from the strings in `strlist`. `scrubs is a list of (find, replace) pairs of regexes that are used on each string in `strlist`. A list of scrubbed strings is returned. Check that the file contains all of a list of strings. An assert will be raised if one of the arguments in `strlist` is missing in `filename`. Check that the file contains none of a list of strings. An assert will be raised if any of the strings in strlist appears in `filename`. Clean `cleandir` by removing it and all its children completely. # rmtree gives mysterious failures on Win7, so retry a "few" times. # I've seen it take over 100 tries, so, 1000! This is probably the # most unpleasant hack I've written in a long time... # pragma: part covered # pragma: not covered Skip the current test. # pragma: not covered Command-line access to test_farm. Commands: run testcase - Run a single test case. out testcase - Run a test case, but don't clean up, to see the output. clean - Clean all the output for all tests. # Run the test for real. # Run the test, but don't clean up, so we can examine the output. # Run all the tests, but just clean. # So that we can run just one farm run.py at a time. | 2.756833 | 3 |
scripts/export.py | sarahnator/py-checkin | 2 | 6612582 | # note -- run in virtualenv w/ command: python scripts/export.py
import numpy as np
import pandas as pd
import myfitnesspal as pal
from scripts.spreadsheet import *
import fitbit as bit
import gather_keys_oauth2 as Oauth2
import datetime
import json
from scripts.dateUtils import *
from fitbit import exceptions
def mfp_data_from_date(date):
"""
Non-verbose function to retrieve all myfitnesspal data from date to now.
:param date: datetime object of desired date, ex: datetime.date(2015, 5, 11)
:return mfp_data: nested list [[weights], [dates], [calories], [carbohydrates], [fats], [protein], [fiber]]
"""
# init connection to mfp api
with open('json/creds.json') as src:
data = json.load(src)
client = pal.Client(data['email'])
weights = client.get_measurements('Weight', date)
weights = list(weights.items()) # convert ordered dictionary to list
data_list = [] # container for data row
for (a, b) in weights:
# query nutrition data
date = a
y, m, d = date.year, date.month, date.day
# get totals
day = client.get_date(y, m, d)
total = day.totals
# int day totals
cal, pro, car, fat, fiber = 0, 0, 0, 0, 0
# check if data exists
if total:
total.pop("sodium") # I am sodium queen DGAF - remove stat from dict
desired_order = ["calories", "protein", "carbohydrates", "fat", "fiber"]
total = {t: total[t] for t in desired_order} # reorder list: {cal, pro, carb, fat, fiber}
else:
total = {"cal": cal, "pro": pro, "car": car, "fat": fat, "fiber": fiber}
weight = float(b)
# prints most recent --> least recent
data_row = {"weight": weight, "date": date}
data_row.update(total) # append totals
data_list.insert(0, data_row) # prepend to front of list of all data
# data list format:
# [{'weight': 122.9, 'date': datetime.date(2020, 5, 24), 'calories': 2316, 'protein': 154, 'carbohydrates': 294,
# 'fat': 65, 'fiber': 62},
# {'weight': 123.0, 'date': datetime.date(2020, 5, 28), 'calories': 2272, 'protein': 153, 'carbohydrates': 291,
# 'fat': 63, 'fiber': 67}]
mfp_data = [list(col) for col in zip(*[d.values() for d in data_list])]
# fmt: [[122.5, 123.3, 123.2, 123.4], --> weight ['05-17', '05-18', '05-19', '05-20'], --> date [2321, 2347, 2324, 2316], --> cals
# [298, 301, 298, 295], --> carbs [63, 65, 63, 63], --> fat [154, 153, 154, 152], --> pro [62, 62, 63, 67]] --> fiber
return mfp_data
def fitbit_data_from_date(date):
"""
Non verbose version.
Initiates fitbit client and server, returns fitbit activity data relative to last calendar Sunday.
If session token has expired, refreshes token and writes updated credentials to json file "json/creds.json".
Outputs progress bars to terminal.
:param date: datetime object of desired date, ex: datetime.date(2015, 5, 11)
:return fitbit_data: nested list [[steps], [distances]]
"""
# TODO: put (re)authentication into separate function
# get credentials from json file
with open('json/creds.json') as src:
data = json.load(src)
CLIENT_ID = data['fitbit-clientID']
CLIENT_SECRET = data['fitbit-secret']
ACCESS_TOKEN = data['fitbit-token']
REFRESH_TOKEN = data['fitbit-refresh-token']
# create server and client
server = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)
auth2_client = bit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN,
refresh_token=REFRESH_TOKEN)
# get end and base date for api call
today = str(datetime.datetime.now().strftime("%Y-%m-%d"))
sunday = str(date.strftime("%Y-%m-%d"))
# catch 401 error / refresh the token if token has expired (pops up browser window)
try:
auth2_client.time_series(resource="activities/steps", base_date=sunday, end_date=today)
except bit.exceptions.HTTPUnauthorized:
server.browser_authorize()
ACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])
REFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])
# rewrite new credentials into json file
with open("json/creds.json", "r") as jsonFile:
creds = json.load(jsonFile)
tmp1 = creds['fitbit-token']
creds['fitbit-token'] = ACCESS_TOKEN
tmp2 = creds['fitbit-refresh-token']
creds['fitbit-refresh-token'] = REFRESH_TOKEN
with open("json/creds.json", "w") as jsonFile:
json.dump(creds, jsonFile)
auth2_client = bit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN,
refresh_token=REFRESH_TOKEN)
# steps and distance query
print("Querying fitbit...")
# format: {'activities-steps': [{'dateTime': '2020-05-25', 'value': '11519'}, {'dateTime': '2020-05-26', 'value': '3428'}]}
# {'activities-distance': [{'dateTime': '2020-05-25', 'value': '4.93872658484712'}, {'dateTime': '2020-05-26', 'value': '1.46974170786144'}]}
steps_log = auth2_client.time_series(resource="activities/steps", base_date=sunday, end_date=today)
dist_log = auth2_client.time_series(resource="activities/distance", base_date=sunday, end_date=today)
# convert to dict-array
# f [{'dateTime': '2020-05-25', 'value': '4.93872658484712'}, {'dateTime': '2020-05-26', 'value': '1.46974170786144'}]
steps_log = steps_log['activities-steps']
dist_log = dist_log['activities-distance']
# reformat
# steps: ['11519', '3428'] dist: ['4.93872658484712', '1.46974170786144']
steps, dist, fitbit_data = [], [], []
for i in range(0, len(steps_log)):
steps_log[i].pop('dateTime')
dist_log[i].pop('dateTime')
steps.append(int(steps_log[i]['value']))
# truncate to 3 decimal places
d = float("%.3F" % float(dist_log[i]['value']))
dist.append(d)
# reformat
# --- steps --- --- dist ---
# [['11519', '3428'], ['4.93872658484712', '1.46974170786144']]
fitbit_data.append(steps)
fitbit_data.append(dist)
# print(fitbit_data)
return fitbit_data
def export_subset(mfp_data, fitbit_data):
"""
Exports weights as y array, calories and steps as multidimensional X matrix
Saves y to y_data.csv, X to X_data.csv in exportedData directory.
Inconveniently uses numpy arrays instead of panda dataframes because I was lazy.
:param mfp_data: nested array of myfitnesspal data
:param fitbit_data: nested array of fitbit data
"""
# create numpy array for weights as ground truth
y = mfp_data[0]
y_data = np.array(y)
# create X inputs
c = np.array(mfp_data[2])
s = np.array(fitbit_data[0])
# transpose 1D matrices
c = np.reshape(c, (len(mfp_data[2]), 1))
s = np.reshape(s, (len(fitbit_data[0]), 1))
# horizontally stack 1D matrices
X_data = np.hstack((c, s))
# print(y_data)
print(X_data) # for debug - if data is 0 anywhere, requery, error on fitbit/mfp
# TODO: save to file function with parameter for appending or overwriting file
fX = open("./exportedData/X_data.csv", "w")
fy = open("./exportedData/y_data.csv", "w")
np.savetxt(fX, X_data, fmt='%6d', delimiter=',')
np.savetxt(fy, y_data, fmt='%3.1f', delimiter=',')
def export_all(mfp_data, fitbit_data):
"""
Combines myfitnesspal and fitbit data into one dataframe, which is then written to a csv file
:param mfp_data: nested array of myfitnesspal data
:param fitbit_data: nested array of fitbit data
"""
mfp_df = pd.DataFrame(mfp_data).transpose()
fitbit_df = pd.DataFrame(fitbit_data).transpose()
all = pd.concat([mfp_df, fitbit_df], axis=1)
print(all)
fAll = open("./exportedData/all.csv", "w")
all.to_csv(fAll, index=False, index_label=False)
if __name__ == "__main__":
d1 = datetime.date(2020, 1, 28) # since working with coach
d2 = datetime.date(2020, 5, 25) # date started fitbit tracking
mfp_data = mfp_data_from_date(d2)
fitbit_data = fitbit_data_from_date(d2)
export_all(mfp_data, fitbit_data)
| # note -- run in virtualenv w/ command: python scripts/export.py
import numpy as np
import pandas as pd
import myfitnesspal as pal
from scripts.spreadsheet import *
import fitbit as bit
import gather_keys_oauth2 as Oauth2
import datetime
import json
from scripts.dateUtils import *
from fitbit import exceptions
def mfp_data_from_date(date):
"""
Non-verbose function to retrieve all myfitnesspal data from date to now.
:param date: datetime object of desired date, ex: datetime.date(2015, 5, 11)
:return mfp_data: nested list [[weights], [dates], [calories], [carbohydrates], [fats], [protein], [fiber]]
"""
# init connection to mfp api
with open('json/creds.json') as src:
data = json.load(src)
client = pal.Client(data['email'])
weights = client.get_measurements('Weight', date)
weights = list(weights.items()) # convert ordered dictionary to list
data_list = [] # container for data row
for (a, b) in weights:
# query nutrition data
date = a
y, m, d = date.year, date.month, date.day
# get totals
day = client.get_date(y, m, d)
total = day.totals
# int day totals
cal, pro, car, fat, fiber = 0, 0, 0, 0, 0
# check if data exists
if total:
total.pop("sodium") # I am sodium queen DGAF - remove stat from dict
desired_order = ["calories", "protein", "carbohydrates", "fat", "fiber"]
total = {t: total[t] for t in desired_order} # reorder list: {cal, pro, carb, fat, fiber}
else:
total = {"cal": cal, "pro": pro, "car": car, "fat": fat, "fiber": fiber}
weight = float(b)
# prints most recent --> least recent
data_row = {"weight": weight, "date": date}
data_row.update(total) # append totals
data_list.insert(0, data_row) # prepend to front of list of all data
# data list format:
# [{'weight': 122.9, 'date': datetime.date(2020, 5, 24), 'calories': 2316, 'protein': 154, 'carbohydrates': 294,
# 'fat': 65, 'fiber': 62},
# {'weight': 123.0, 'date': datetime.date(2020, 5, 28), 'calories': 2272, 'protein': 153, 'carbohydrates': 291,
# 'fat': 63, 'fiber': 67}]
mfp_data = [list(col) for col in zip(*[d.values() for d in data_list])]
# fmt: [[122.5, 123.3, 123.2, 123.4], --> weight ['05-17', '05-18', '05-19', '05-20'], --> date [2321, 2347, 2324, 2316], --> cals
# [298, 301, 298, 295], --> carbs [63, 65, 63, 63], --> fat [154, 153, 154, 152], --> pro [62, 62, 63, 67]] --> fiber
return mfp_data
def fitbit_data_from_date(date):
"""
Non verbose version.
Initiates fitbit client and server, returns fitbit activity data relative to last calendar Sunday.
If session token has expired, refreshes token and writes updated credentials to json file "json/creds.json".
Outputs progress bars to terminal.
:param date: datetime object of desired date, ex: datetime.date(2015, 5, 11)
:return fitbit_data: nested list [[steps], [distances]]
"""
# TODO: put (re)authentication into separate function
# get credentials from json file
with open('json/creds.json') as src:
data = json.load(src)
CLIENT_ID = data['fitbit-clientID']
CLIENT_SECRET = data['fitbit-secret']
ACCESS_TOKEN = data['fitbit-token']
REFRESH_TOKEN = data['fitbit-refresh-token']
# create server and client
server = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)
auth2_client = bit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN,
refresh_token=REFRESH_TOKEN)
# get end and base date for api call
today = str(datetime.datetime.now().strftime("%Y-%m-%d"))
sunday = str(date.strftime("%Y-%m-%d"))
# catch 401 error / refresh the token if token has expired (pops up browser window)
try:
auth2_client.time_series(resource="activities/steps", base_date=sunday, end_date=today)
except bit.exceptions.HTTPUnauthorized:
server.browser_authorize()
ACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])
REFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])
# rewrite new credentials into json file
with open("json/creds.json", "r") as jsonFile:
creds = json.load(jsonFile)
tmp1 = creds['fitbit-token']
creds['fitbit-token'] = ACCESS_TOKEN
tmp2 = creds['fitbit-refresh-token']
creds['fitbit-refresh-token'] = REFRESH_TOKEN
with open("json/creds.json", "w") as jsonFile:
json.dump(creds, jsonFile)
auth2_client = bit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN,
refresh_token=REFRESH_TOKEN)
# steps and distance query
print("Querying fitbit...")
# format: {'activities-steps': [{'dateTime': '2020-05-25', 'value': '11519'}, {'dateTime': '2020-05-26', 'value': '3428'}]}
# {'activities-distance': [{'dateTime': '2020-05-25', 'value': '4.93872658484712'}, {'dateTime': '2020-05-26', 'value': '1.46974170786144'}]}
steps_log = auth2_client.time_series(resource="activities/steps", base_date=sunday, end_date=today)
dist_log = auth2_client.time_series(resource="activities/distance", base_date=sunday, end_date=today)
# convert to dict-array
# f [{'dateTime': '2020-05-25', 'value': '4.93872658484712'}, {'dateTime': '2020-05-26', 'value': '1.46974170786144'}]
steps_log = steps_log['activities-steps']
dist_log = dist_log['activities-distance']
# reformat
# steps: ['11519', '3428'] dist: ['4.93872658484712', '1.46974170786144']
steps, dist, fitbit_data = [], [], []
for i in range(0, len(steps_log)):
steps_log[i].pop('dateTime')
dist_log[i].pop('dateTime')
steps.append(int(steps_log[i]['value']))
# truncate to 3 decimal places
d = float("%.3F" % float(dist_log[i]['value']))
dist.append(d)
# reformat
# --- steps --- --- dist ---
# [['11519', '3428'], ['4.93872658484712', '1.46974170786144']]
fitbit_data.append(steps)
fitbit_data.append(dist)
# print(fitbit_data)
return fitbit_data
def export_subset(mfp_data, fitbit_data):
"""
Exports weights as y array, calories and steps as multidimensional X matrix
Saves y to y_data.csv, X to X_data.csv in exportedData directory.
Inconveniently uses numpy arrays instead of panda dataframes because I was lazy.
:param mfp_data: nested array of myfitnesspal data
:param fitbit_data: nested array of fitbit data
"""
# create numpy array for weights as ground truth
y = mfp_data[0]
y_data = np.array(y)
# create X inputs
c = np.array(mfp_data[2])
s = np.array(fitbit_data[0])
# transpose 1D matrices
c = np.reshape(c, (len(mfp_data[2]), 1))
s = np.reshape(s, (len(fitbit_data[0]), 1))
# horizontally stack 1D matrices
X_data = np.hstack((c, s))
# print(y_data)
print(X_data) # for debug - if data is 0 anywhere, requery, error on fitbit/mfp
# TODO: save to file function with parameter for appending or overwriting file
fX = open("./exportedData/X_data.csv", "w")
fy = open("./exportedData/y_data.csv", "w")
np.savetxt(fX, X_data, fmt='%6d', delimiter=',')
np.savetxt(fy, y_data, fmt='%3.1f', delimiter=',')
def export_all(mfp_data, fitbit_data):
"""
Combines myfitnesspal and fitbit data into one dataframe, which is then written to a csv file
:param mfp_data: nested array of myfitnesspal data
:param fitbit_data: nested array of fitbit data
"""
mfp_df = pd.DataFrame(mfp_data).transpose()
fitbit_df = pd.DataFrame(fitbit_data).transpose()
all = pd.concat([mfp_df, fitbit_df], axis=1)
print(all)
fAll = open("./exportedData/all.csv", "w")
all.to_csv(fAll, index=False, index_label=False)
if __name__ == "__main__":
d1 = datetime.date(2020, 1, 28) # since working with coach
d2 = datetime.date(2020, 5, 25) # date started fitbit tracking
mfp_data = mfp_data_from_date(d2)
fitbit_data = fitbit_data_from_date(d2)
export_all(mfp_data, fitbit_data)
| en | 0.531303 | # note -- run in virtualenv w/ command: python scripts/export.py Non-verbose function to retrieve all myfitnesspal data from date to now. :param date: datetime object of desired date, ex: datetime.date(2015, 5, 11) :return mfp_data: nested list [[weights], [dates], [calories], [carbohydrates], [fats], [protein], [fiber]] # init connection to mfp api # convert ordered dictionary to list # container for data row # query nutrition data # get totals # int day totals # check if data exists # I am sodium queen DGAF - remove stat from dict # reorder list: {cal, pro, carb, fat, fiber} # prints most recent --> least recent # append totals # prepend to front of list of all data # data list format: # [{'weight': 122.9, 'date': datetime.date(2020, 5, 24), 'calories': 2316, 'protein': 154, 'carbohydrates': 294, # 'fat': 65, 'fiber': 62}, # {'weight': 123.0, 'date': datetime.date(2020, 5, 28), 'calories': 2272, 'protein': 153, 'carbohydrates': 291, # 'fat': 63, 'fiber': 67}] # fmt: [[122.5, 123.3, 123.2, 123.4], --> weight ['05-17', '05-18', '05-19', '05-20'], --> date [2321, 2347, 2324, 2316], --> cals # [298, 301, 298, 295], --> carbs [63, 65, 63, 63], --> fat [154, 153, 154, 152], --> pro [62, 62, 63, 67]] --> fiber Non verbose version. Initiates fitbit client and server, returns fitbit activity data relative to last calendar Sunday. If session token has expired, refreshes token and writes updated credentials to json file "json/creds.json". Outputs progress bars to terminal. :param date: datetime object of desired date, ex: datetime.date(2015, 5, 11) :return fitbit_data: nested list [[steps], [distances]] # TODO: put (re)authentication into separate function # get credentials from json file # create server and client # get end and base date for api call # catch 401 error / refresh the token if token has expired (pops up browser window) # rewrite new credentials into json file # steps and distance query # format: {'activities-steps': [{'dateTime': '2020-05-25', 'value': '11519'}, {'dateTime': '2020-05-26', 'value': '3428'}]} # {'activities-distance': [{'dateTime': '2020-05-25', 'value': '4.93872658484712'}, {'dateTime': '2020-05-26', 'value': '1.46974170786144'}]} # convert to dict-array # f [{'dateTime': '2020-05-25', 'value': '4.93872658484712'}, {'dateTime': '2020-05-26', 'value': '1.46974170786144'}] # reformat # steps: ['11519', '3428'] dist: ['4.93872658484712', '1.46974170786144'] # truncate to 3 decimal places # reformat # --- steps --- --- dist --- # [['11519', '3428'], ['4.93872658484712', '1.46974170786144']] # print(fitbit_data) Exports weights as y array, calories and steps as multidimensional X matrix Saves y to y_data.csv, X to X_data.csv in exportedData directory. Inconveniently uses numpy arrays instead of panda dataframes because I was lazy. :param mfp_data: nested array of myfitnesspal data :param fitbit_data: nested array of fitbit data # create numpy array for weights as ground truth # create X inputs # transpose 1D matrices # horizontally stack 1D matrices # print(y_data) # for debug - if data is 0 anywhere, requery, error on fitbit/mfp # TODO: save to file function with parameter for appending or overwriting file Combines myfitnesspal and fitbit data into one dataframe, which is then written to a csv file :param mfp_data: nested array of myfitnesspal data :param fitbit_data: nested array of fitbit data # since working with coach # date started fitbit tracking | 3.135245 | 3 |
current files/model.py | parthematics/waves | 0 | 6612583 | <reponame>parthematics/waves<filename>current files/model.py
import tensorflow as tf
import numpy as np
import pickle
def sample_batch(data, all_labels, size_batch, i):
start = (i * size_batch) % len(data)
end = (i * size_batch + size_batch) % len(data)
if not start <= end:
return data[start:end], all_labels[start:end]
else:
data_in_batch = np.vstack((data[start:], data[:end]))
assert isinstance(all_labels, object)
labels_in_batch = np.vstack((all_labels[start:], all_labels[:end]))
return data_in_batch, labels_in_batch
if __name__ == "__main__":
# adjustable parameters
learn_rate = 0.001
max_iterations = 10000
disp_step = 1
training_size = 700
batch_size = 64
# parameters for cnn
input_size = 599 * 13 * 5
dropout_rate = 0.72
num_classes = 10
sound_data = []
all_labels = []
# reads from files that were created using the preprocessing scripts (mfcc saver)
with open('data', 'r') as f:
info = f.read()
sound_data = pickle.loads(info)
assert isinstance(sound_data, object)
sound_data = np.asarray(sound_data)
sound_data = sound_data.reshape((sound_data.shape[0], input_size))
with open('labels', 'r') as f:
info = f.read()
all_labels = pickle.loads(info)
# shuffle data
shuffled_data = np.random.permutation(len(sound_data))
sound_data = sound_data[shuffled_data]
all_labels = all_labels[shuffled_data]
# train/test split
training_X = sound_data[:training_size]
training_y = all_labels[:training_size]
testing_X = sound_data[training_size:]
testing_y = all_labels[training_size:]
# initialize tensorflow graph
X = tf.placeholder(tf.float32, [None, input_size])
Y = tf.placeholder(tf.float32, [None, num_classes])
prob_dropout = tf.placeholder(tf.float32)
def max_pooling(sound, k):
return tf.nn.max_pool(sound, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
def conv_layer(song_sample, w, b):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(song_sample, w, strides=[1, 1, 1, 1],
padding='SAME'), b))
# creates and trains convolutional neural network for music sample classification
def create_CNN(input_layer, weights_, biases_, dropout_rate):
# reshape input
input_layer = tf.reshape(input_layer, shape=[-1, 599, 13, 5])
# convolution layer w/ max pooling and dropout applied
conv1 = conv_layer(input_layer, weights_['wc1'], biases_['bc1'])
conv1 = max_pooling(conv1, k=4)
conv1 = tf.nn.dropout(conv1, dropout_rate)
# 2nd convolution layer w/ max pooling and dropout applied
conv2 = conv_layer(conv1, weights_['wc2'], biases_['bc2'])
conv2 = max_pooling(conv2, k=2)
conv2 = tf.nn.dropout(conv2, dropout_rate)
# dense layer w/ relu activation and dropout applied
dense1 = tf.reshape(conv2, [-1, weights_['wd1'].get_shape().as_list()[0]])
dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, weights_['wd1']), biases_['bd1']))
dense1 = tf.nn.dropout(dense1, dropout_rate)
output = tf.add(tf.matmul(dense1, weights_['out']), biases_['out'])
return output
# store biases and weights for CNN
biases = {
'bc1': tf.Variable(tf.random_normal([149])),
'bc2': tf.Variable(tf.random_normal([73])),
'bc3': tf.Variable(tf.random_normal([35])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
weights = {
'wc1': tf.Variable(tf.random_normal([4, 4, 5, 149])),
'wc2': tf.Variable(tf.random_normal([4, 4, 149, 73])),
'wc3': tf.Variable(tf.random_normal([2, 2, 73, 35])),
'wd1': tf.Variable(tf.random_normal([75 * 2 * 73, 1024])),
'out': tf.Variable(tf.random_normal([1024, num_classes]))
}
# create model
model = create_CNN(X, weights, biases, prob_dropout)
# loss and optimizer (softmax w/ cross entropy and adam, as usual haha)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(model, Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learn_rate).minimize(cost)
# evaluate model
predicted_correct = tf.equal(tf.argmax(model, 1), tf.argmax(Y, 1))
_accuracy = tf.reduce_mean(tf.cast(predicted_correct, tf.float32))
restart = tf.initialize_all_variables()
saver = tf.train.Saver()
# launch graph
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
sess.run(restart)
step = 1
# train until max iterations is reached
while step * batch_size < max_iterations:
batch_xs, batch_ys = sample_batch(training_X, training_y, batch_size, step)
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys, prob_dropout: dropout_rate})
if step % disp_step == 0:
accuracy = sess.run(_accuracy, feed_dict={X: batch_xs, Y: batch_ys, prob_dropout: 1.})
loss = sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, prob_dropout: 1.})
print("iteration " + str(step * batch_size) + ", loss for batch = " + \
"{:.6f}".format(loss) + ", accuracy= " + "{:.5f}".format(accuracy))
saved = saver.save(sess, "model.ckpt")
print("model saved in file: %s" % saved)
step += 1
print("model trained!")
saved = saver.save(sess, "model.pkt")
print("model saved as: %s" % saved)
print("accuracy:", sess.run(_accuracy, feed_dict={X: testing_X,
Y: testing_y,
prob_dropout: 1.}))
| files/model.py
import tensorflow as tf
import numpy as np
import pickle
def sample_batch(data, all_labels, size_batch, i):
start = (i * size_batch) % len(data)
end = (i * size_batch + size_batch) % len(data)
if not start <= end:
return data[start:end], all_labels[start:end]
else:
data_in_batch = np.vstack((data[start:], data[:end]))
assert isinstance(all_labels, object)
labels_in_batch = np.vstack((all_labels[start:], all_labels[:end]))
return data_in_batch, labels_in_batch
if __name__ == "__main__":
# adjustable parameters
learn_rate = 0.001
max_iterations = 10000
disp_step = 1
training_size = 700
batch_size = 64
# parameters for cnn
input_size = 599 * 13 * 5
dropout_rate = 0.72
num_classes = 10
sound_data = []
all_labels = []
# reads from files that were created using the preprocessing scripts (mfcc saver)
with open('data', 'r') as f:
info = f.read()
sound_data = pickle.loads(info)
assert isinstance(sound_data, object)
sound_data = np.asarray(sound_data)
sound_data = sound_data.reshape((sound_data.shape[0], input_size))
with open('labels', 'r') as f:
info = f.read()
all_labels = pickle.loads(info)
# shuffle data
shuffled_data = np.random.permutation(len(sound_data))
sound_data = sound_data[shuffled_data]
all_labels = all_labels[shuffled_data]
# train/test split
training_X = sound_data[:training_size]
training_y = all_labels[:training_size]
testing_X = sound_data[training_size:]
testing_y = all_labels[training_size:]
# initialize tensorflow graph
X = tf.placeholder(tf.float32, [None, input_size])
Y = tf.placeholder(tf.float32, [None, num_classes])
prob_dropout = tf.placeholder(tf.float32)
def max_pooling(sound, k):
return tf.nn.max_pool(sound, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
def conv_layer(song_sample, w, b):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(song_sample, w, strides=[1, 1, 1, 1],
padding='SAME'), b))
# creates and trains convolutional neural network for music sample classification
def create_CNN(input_layer, weights_, biases_, dropout_rate):
# reshape input
input_layer = tf.reshape(input_layer, shape=[-1, 599, 13, 5])
# convolution layer w/ max pooling and dropout applied
conv1 = conv_layer(input_layer, weights_['wc1'], biases_['bc1'])
conv1 = max_pooling(conv1, k=4)
conv1 = tf.nn.dropout(conv1, dropout_rate)
# 2nd convolution layer w/ max pooling and dropout applied
conv2 = conv_layer(conv1, weights_['wc2'], biases_['bc2'])
conv2 = max_pooling(conv2, k=2)
conv2 = tf.nn.dropout(conv2, dropout_rate)
# dense layer w/ relu activation and dropout applied
dense1 = tf.reshape(conv2, [-1, weights_['wd1'].get_shape().as_list()[0]])
dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, weights_['wd1']), biases_['bd1']))
dense1 = tf.nn.dropout(dense1, dropout_rate)
output = tf.add(tf.matmul(dense1, weights_['out']), biases_['out'])
return output
# store biases and weights for CNN
biases = {
'bc1': tf.Variable(tf.random_normal([149])),
'bc2': tf.Variable(tf.random_normal([73])),
'bc3': tf.Variable(tf.random_normal([35])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
weights = {
'wc1': tf.Variable(tf.random_normal([4, 4, 5, 149])),
'wc2': tf.Variable(tf.random_normal([4, 4, 149, 73])),
'wc3': tf.Variable(tf.random_normal([2, 2, 73, 35])),
'wd1': tf.Variable(tf.random_normal([75 * 2 * 73, 1024])),
'out': tf.Variable(tf.random_normal([1024, num_classes]))
}
# create model
model = create_CNN(X, weights, biases, prob_dropout)
# loss and optimizer (softmax w/ cross entropy and adam, as usual haha)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(model, Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learn_rate).minimize(cost)
# evaluate model
predicted_correct = tf.equal(tf.argmax(model, 1), tf.argmax(Y, 1))
_accuracy = tf.reduce_mean(tf.cast(predicted_correct, tf.float32))
restart = tf.initialize_all_variables()
saver = tf.train.Saver()
# launch graph
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
sess.run(restart)
step = 1
# train until max iterations is reached
while step * batch_size < max_iterations:
batch_xs, batch_ys = sample_batch(training_X, training_y, batch_size, step)
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys, prob_dropout: dropout_rate})
if step % disp_step == 0:
accuracy = sess.run(_accuracy, feed_dict={X: batch_xs, Y: batch_ys, prob_dropout: 1.})
loss = sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, prob_dropout: 1.})
print("iteration " + str(step * batch_size) + ", loss for batch = " + \
"{:.6f}".format(loss) + ", accuracy= " + "{:.5f}".format(accuracy))
saved = saver.save(sess, "model.ckpt")
print("model saved in file: %s" % saved)
step += 1
print("model trained!")
saved = saver.save(sess, "model.pkt")
print("model saved as: %s" % saved)
print("accuracy:", sess.run(_accuracy, feed_dict={X: testing_X,
Y: testing_y,
prob_dropout: 1.})) | en | 0.847198 | # adjustable parameters # parameters for cnn # reads from files that were created using the preprocessing scripts (mfcc saver) # shuffle data # train/test split # initialize tensorflow graph # creates and trains convolutional neural network for music sample classification # reshape input # convolution layer w/ max pooling and dropout applied # 2nd convolution layer w/ max pooling and dropout applied # dense layer w/ relu activation and dropout applied # store biases and weights for CNN # create model # loss and optimizer (softmax w/ cross entropy and adam, as usual haha) # evaluate model # launch graph # train until max iterations is reached | 2.501376 | 3 |
python/testData/debug/test_warnings_suppressing.py | tgodzik/intellij-community | 2 | 6612584 | from __future__ import print_function
import warnings
class ClassWithDeprecatedProperty:
@property
def x(self):
warnings.warn("This property is deprecated!")
return 42
obj = ClassWithDeprecatedProperty()
warnings.warn("This warning should appear in the output.")
del globals()['__warningregistry__']
print(obj.x)
print(obj)
| from __future__ import print_function
import warnings
class ClassWithDeprecatedProperty:
@property
def x(self):
warnings.warn("This property is deprecated!")
return 42
obj = ClassWithDeprecatedProperty()
warnings.warn("This warning should appear in the output.")
del globals()['__warningregistry__']
print(obj.x)
print(obj)
| none | 1 | 2.791639 | 3 | |
services/schedule_service.py | mrtmrtmlck/git-catch-server | 2 | 6612585 | <reponame>mrtmrtmlck/git-catch-server
from apscheduler.schedulers.background import BackgroundScheduler
from services import email_service
def schedule_issue_emails():
scheduler = BackgroundScheduler()
scheduler.add_job(email_service.send_issues, 'cron', hour=12, minute=40)
scheduler.add_job(email_service.send_issues, 'cron', hour=15, minute=0)
scheduler.add_job(email_service.send_issues, 'cron', hour=20, minute=0)
scheduler.start()
| from apscheduler.schedulers.background import BackgroundScheduler
from services import email_service
def schedule_issue_emails():
scheduler = BackgroundScheduler()
scheduler.add_job(email_service.send_issues, 'cron', hour=12, minute=40)
scheduler.add_job(email_service.send_issues, 'cron', hour=15, minute=0)
scheduler.add_job(email_service.send_issues, 'cron', hour=20, minute=0)
scheduler.start() | none | 1 | 2.441562 | 2 | |
fhir/resources/tests/test_codesystem.py | cstoltze/fhir.resources | 144 | 6612586 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/CodeSystem
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import codesystem
def impl_codesystem_1(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "source"
assert inst.concept[0].definition == (
"This structure describes an instance passed to the mapping "
"engine that is used a source of data."
)
assert inst.concept[0].display == "Source Structure Definition"
assert inst.concept[1].code == "queried"
assert inst.concept[1].definition == (
"This structure describes an instance that the mapping engine"
" may ask for that is used a source of data."
)
assert inst.concept[1].display == "Queried Structure Definition"
assert inst.concept[2].code == "target"
assert inst.concept[2].definition == (
"This structure describes an instance passed to the mapping "
"engine that is used a target of data."
)
assert inst.concept[2].display == "Target Structure Definition"
assert inst.concept[3].code == "produced"
assert inst.concept[3].definition == (
"This structure describes an instance that the mapping engine"
" may ask to create that is used a target of data."
)
assert inst.concept[3].display == "Produced Structure Definition"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert inst.description == "How the referenced structure is used in this mapping."
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "fhir"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 2
assert inst.id == "map-model-mode"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.676"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "StructureMapModelMode"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "StructureMapModelMode"
assert inst.url == "http://hl7.org/fhir/map-model-mode"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/map-model-mode"
assert inst.version == "4.0.1"
def test_codesystem_1(base_settings):
"""No. 1 tests collection for CodeSystem.
Test File: codesystem-map-model-mode.json
"""
filename = base_settings["unittest_data_dir"] / "codesystem-map-model-mode.json"
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_1(inst2)
def impl_codesystem_2(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "true"
assert inst.concept[0].definition == "Boolean true."
assert inst.concept[0].display == "true"
assert inst.concept[1].code == "false"
assert inst.concept[1].definition == "Boolean false."
assert inst.concept[1].display == "false"
assert inst.concept[2].code == "trace"
assert inst.concept[2].definition == (
"The content is greater than zero, but too small to be " "quantified."
)
assert inst.concept[2].display == "Trace Amount Detected"
assert inst.concept[3].code == "sufficient"
assert inst.concept[3].definition == (
"The specific quantity is not known, but is known to be non-"
"zero and is not specified because it makes up the bulk of "
"the material."
)
assert inst.concept[3].display == "Sufficient Quantity"
assert inst.concept[3].extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/codesystem-concept-" "comments"
)
assert inst.concept[3].extension[0].valueString == (
"used in formulations (e.g. 'Add 10mg of ingredient X, 50mg "
"of ingredient Y, and sufficient quantity of water to 100mL.'"
" This code would be used to express the quantity of water. )"
)
assert inst.concept[4].code == "withdrawn"
assert inst.concept[4].definition == "The value is no longer available."
assert inst.concept[4].display == "Value Withdrawn"
assert inst.concept[5].code == "nil-known"
assert (
inst.concept[5].definition
== "The are no known applicable values in this context."
)
assert inst.concept[5].display == "Nil Known"
assert inst.concept[5].extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/codesystem-concept-" "comments"
)
assert (
inst.concept[5].extension[0].valueString
== "The existence of this subject to review"
)
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert inst.description == (
"A set of generally useful codes defined so they can be "
"included in value sets."
)
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "fhir"
assert inst.id == "special-values"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.1049"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "SpecialValues"
assert inst.status == "draft"
assert inst.text.status == "extensions"
assert inst.title == "SpecialValues"
assert inst.url == "http://terminology.hl7.org/CodeSystem/special-values"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/special-values"
assert inst.version == "4.0.1"
def test_codesystem_2(base_settings):
"""No. 2 tests collection for CodeSystem.
Test File: codesystem-special-values.json
"""
filename = base_settings["unittest_data_dir"] / "codesystem-special-values.json"
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_2(inst2)
def impl_codesystem_3(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "unknown"
assert (
inst.concept[0].definition
== "The communication was not done due to an unknown reason."
)
assert inst.concept[0].display == "Unknown"
assert inst.concept[1].code == "system-error"
assert (
inst.concept[1].definition
== "The communication was not done due to a system error."
)
assert inst.concept[1].display == "System Error"
assert inst.concept[2].code == "invalid-phone-number"
assert inst.concept[2].definition == (
"The communication was not done due to an invalid phone " "number."
)
assert inst.concept[2].display == "Invalid Phone Number"
assert inst.concept[3].code == "recipient-unavailable"
assert inst.concept[3].definition == (
"The communication was not done due to the recipient being " "unavailable."
)
assert inst.concept[3].display == "Recipient Unavailable"
assert inst.concept[4].code == "family-objection"
assert (
inst.concept[4].definition
== "The communication was not done due to a family objection."
)
assert inst.concept[4].display == "Family Objection"
assert inst.concept[5].code == "patient-objection"
assert (
inst.concept[5].definition
== "The communication was not done due to a patient objection."
)
assert inst.concept[5].display == "Patient Objection"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert (
inst.description == "Codes for the reason why a communication did not happen."
)
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "pc"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "draft"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 1
assert inst.id == "communication-not-done-reason"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.1077"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "CommunicationNotDoneReason"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "CommunicationNotDoneReason"
assert inst.url == (
"http://terminology.hl7.org/CodeSystem/communication-not-" "done-reason"
)
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/communication-not-done-reason"
assert inst.version == "4.0.1"
def test_codesystem_3(base_settings):
"""No. 3 tests collection for CodeSystem.
Test File: codesystem-communication-not-done-reason.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "codesystem-communication-not-done-reason.json"
)
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_3(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_3(inst2)
def impl_codesystem_4(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "grouped-by"
assert inst.concept[0].display == "Grouped By"
assert inst.concept[1].code == "is-a"
assert inst.concept[1].display == "Is-A"
assert inst.concept[2].code == "part-of"
assert inst.concept[2].definition == (
"Child elements list the individual parts of a composite "
"whole (e.g. body site)."
)
assert inst.concept[2].display == "Part Of"
assert inst.concept[3].code == "classified-with"
assert inst.concept[3].display == "Classified With"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>.org"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert (
inst.description == "The meaning of the hierarchy of concepts in a code system."
)
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "vocab"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "normative"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"normative-version"
)
assert inst.extension[2].valueCode == "4.0.0"
assert inst.extension[3].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[3].valueInteger == 5
assert inst.id == "codesystem-hierarchy-meaning"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.785"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "CodeSystemHierarchyMeaning"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "active"
assert inst.text.status == "generated"
assert inst.title == "CodeSystemHierarchyMeaning"
assert inst.url == "http://hl7.org/fhir/codesystem-hierarchy-meaning"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/codesystem-hierarchy-meaning"
assert inst.version == "4.0.1"
def test_codesystem_4(base_settings):
"""No. 4 tests collection for CodeSystem.
Test File: codesystem-codesystem-hierarchy-meaning.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "codesystem-codesystem-hierarchy-meaning.json"
)
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_4(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_4(inst2)
def impl_codesystem_5(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "continuous"
assert inst.concept[0].definition == (
"A medication which is expected to be continued beyond the "
"present order and which the patient should be assumed to be "
"taking unless explicitly stopped."
)
assert inst.concept[0].display == "Continuous long term therapy"
assert inst.concept[1].code == "acute"
assert inst.concept[1].definition == (
"A medication which the patient is only expected to consume "
"for the duration of the current order and which is not "
"expected to be renewed."
)
assert inst.concept[1].display == "Short course (acute) therapy"
assert inst.concept[2].code == "seasonal"
assert inst.concept[2].definition == (
"A medication which is expected to be used on a part time "
"basis at certain times of the year"
)
assert inst.concept[2].display == "Seasonal"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.content == "complete"
assert inst.description == "MedicationRequest Course of Therapy Codes"
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "phx"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "draft"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 1
assert inst.id == "medicationrequest-course-of-therapy"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.1327"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert (
inst.meta.profile[0]
== "http://hl7.org/fhir/StructureDefinition/shareablecodesystem"
)
assert inst.name == "medicationRequest Course of Therapy Codes"
assert inst.publisher == "FHIR Project team"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "Medication request course of therapy codes"
assert inst.url == (
"http://terminology.hl7.org/CodeSystem/medicationrequest-" "course-of-therapy"
)
assert inst.valueSet == (
"http://hl7.org/fhir/ValueSet/medicationrequest-course-of-" "therapy"
)
assert inst.version == "4.0.1"
def test_codesystem_5(base_settings):
"""No. 5 tests collection for CodeSystem.
Test File: codesystem-medicationrequest-course-of-therapy.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "codesystem-medicationrequest-course-of-therapy.json"
)
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_5(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_5(inst2)
def impl_codesystem_6(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "0"
assert inst.concept[0].definition == (
"The operation completed successfully (whether with warnings " "or not)."
)
assert inst.concept[0].display == "Success"
assert inst.concept[1].code == "4"
assert inst.concept[1].definition == (
"The action was not successful due to some kind of minor "
"failure (often equivalent to an HTTP 400 response)."
)
assert inst.concept[1].display == "Minor failure"
assert inst.concept[2].code == "8"
assert inst.concept[2].definition == (
"The action was not successful due to some kind of unexpected"
" error (often equivalent to an HTTP 500 response)."
)
assert inst.concept[2].display == "Serious failure"
assert inst.concept[3].code == "12"
assert inst.concept[3].definition == (
"An error of such magnitude occurred that the system is no "
"longer available for use (i.e. the system died)."
)
assert inst.concept[3].display == "Major failure"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert inst.description == "Indicates whether the event succeeded or failed."
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "sec"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 3
assert inst.id == "audit-event-outcome"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.455"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "AuditEventOutcome"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "AuditEventOutcome"
assert inst.url == "http://hl7.org/fhir/audit-event-outcome"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/audit-event-outcome"
assert inst.version == "4.0.1"
def test_codesystem_6(base_settings):
"""No. 6 tests collection for CodeSystem.
Test File: codesystem-audit-event-outcome.json
"""
filename = (
base_settings["unittest_data_dir"] / "codesystem-audit-event-outcome.json"
)
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_6(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_6(inst2)
def impl_codesystem_7(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "disclosure-ca"
assert (
inst.concept[0].definition == "Canadian health information disclosure policy."
)
assert inst.concept[0].display == "Disclosure-CA"
assert inst.concept[1].code == "disclosure-us"
assert (
inst.concept[1].definition
== "United States health information disclosure policy."
)
assert inst.concept[1].display == "Disclosure-US"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.content == "complete"
assert inst.copyright == "This is an example set."
assert inst.description == "This value set includes sample Contract Subtype codes."
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "fm"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "draft"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 1
assert inst.id == "contract-subtype"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.1198"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert (
inst.meta.profile[0]
== "http://hl7.org/fhir/StructureDefinition/shareablecodesystem"
)
assert inst.name == "ContractSubtypeCodes"
assert inst.publisher == "Financial Management"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "Contract Subtype Codes"
assert inst.url == "http://terminology.hl7.org/CodeSystem/contractsubtypecodes"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/contract-subtype"
assert inst.version == "4.0.1"
def test_codesystem_7(base_settings):
"""No. 7 tests collection for CodeSystem.
Test File: codesystem-contract-subtype.json
"""
filename = base_settings["unittest_data_dir"] / "codesystem-contract-subtype.json"
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_7(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_7(inst2)
def impl_codesystem_8(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "create"
assert inst.concept[0].definition == (
"create(type : string) - type is passed through to the "
"application on the standard API, and must be known by it."
)
assert inst.concept[0].display == "create"
assert inst.concept[1].code == "copy"
assert inst.concept[1].definition == "copy(source)."
assert inst.concept[1].display == "copy"
assert inst.concept[2].code == "truncate"
assert (
inst.concept[2].definition
== "truncate(source, length) - source must be stringy type."
)
assert inst.concept[2].display == "truncate"
assert inst.concept[3].code == "escape"
assert inst.concept[3].definition == (
"escape(source, fmt1, fmt2) - change source from one kind of "
"escaping to another (plain, java, xml, json). note that this"
" is for when the string itself is escaped."
)
assert inst.concept[3].display == "escape"
assert inst.concept[4].code == "cast"
assert inst.concept[4].definition == (
"cast(source, type?) - case source from one type to another. "
"target type can be left as implicit if there is one and only"
" one target type known."
)
assert inst.concept[4].display == "cast"
assert inst.concept[5].code == "append"
assert (
inst.concept[5].definition == "append(source...) - source is element or string."
)
assert inst.concept[5].display == "append"
assert inst.concept[6].code == "translate"
assert (
inst.concept[6].definition
== "translate(source, uri_of_map) - use the translate operation."
)
assert inst.concept[6].display == "translate"
assert inst.concept[7].code == "reference"
assert inst.concept[7].definition == (
"reference(source : object) - return a string that references"
" the provided tree properly."
)
assert inst.concept[7].display == "reference"
assert inst.concept[8].code == "dateOp"
assert (
inst.concept[8].definition
== "Perform a date operation. *Parameters to be documented*."
)
assert inst.concept[8].display == "dateOp"
assert inst.concept[9].code == "uuid"
assert (
inst.concept[9].definition
== "Generate a random UUID (in lowercase). No Parameters."
)
assert inst.concept[9].display == "uuid"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert inst.description == "How data is copied/created."
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "fhir"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 2
assert inst.id == "map-transform"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.682"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "StructureMapTransform"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "StructureMapTransform"
assert inst.url == "http://hl7.org/fhir/map-transform"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/map-transform"
assert inst.version == "4.0.1"
def test_codesystem_8(base_settings):
"""No. 8 tests collection for CodeSystem.
Test File: codesystem-map-transform.json
"""
filename = base_settings["unittest_data_dir"] / "codesystem-map-transform.json"
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_8(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_8(inst2)
def impl_codesystem_9(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "registered"
assert inst.concept[0].definition == (
"The existence of the imaging study is registered, but there "
"is nothing yet available."
)
assert inst.concept[0].display == "Registered"
assert inst.concept[1].code == "available"
assert inst.concept[1].definition == (
"At least one instance has been associated with this imaging " "study."
)
assert inst.concept[1].display == "Available"
assert inst.concept[2].code == "cancelled"
assert inst.concept[2].definition == (
"The imaging study is unavailable because the imaging study "
"was not started or not completed (also sometimes called "
'"aborted").'
)
assert inst.concept[2].display == "Cancelled"
assert inst.concept[3].code == "entered-in-error"
assert inst.concept[3].display == "Entered in Error"
assert inst.concept[4].code == "unknown"
assert inst.concept[4].display == "Unknown"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert inst.description == "The status of the ImagingStudy."
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "ii"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 3
assert inst.id == "imagingstudy-status"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.991"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "ImagingStudyStatus"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "ImagingStudyStatus"
assert inst.url == "http://hl7.org/fhir/imagingstudy-status"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/imagingstudy-status"
assert inst.version == "4.0.1"
def test_codesystem_9(base_settings):
"""No. 9 tests collection for CodeSystem.
Test File: codesystem-imagingstudy-status.json
"""
filename = (
base_settings["unittest_data_dir"] / "codesystem-imagingstudy-status.json"
)
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_9(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_9(inst2)
def impl_codesystem_10(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "benefit"
assert inst.concept[0].definition == "Maximum benefit allowable."
assert inst.concept[0].display == "Benefit"
assert inst.concept[1].code == "deductible"
assert (
inst.concept[1].definition == "Cost to be incurred before benefits are applied"
)
assert inst.concept[1].display == "Deductible"
assert inst.concept[2].code == "visit"
assert inst.concept[2].definition == "Service visit"
assert inst.concept[2].display == "Visit"
assert inst.concept[3].code == "room"
assert inst.concept[3].definition == "Type of room"
assert inst.concept[3].display == "Room"
assert inst.concept[4].code == "copay"
assert inst.concept[4].definition == "Copayment per service"
assert inst.concept[4].display == "Copayment per service"
assert inst.concept[5].code == "copay-percent"
assert inst.concept[5].definition == "Copayment percentage per service"
assert inst.concept[5].display == "Copayment Percent per service"
assert inst.concept[6].code == "copay-maximum"
assert inst.concept[6].definition == "Copayment maximum per service"
assert inst.concept[6].display == "Copayment maximum per service"
assert inst.concept[7].code == "vision-exam"
assert inst.concept[7].definition == "Vision Exam"
assert inst.concept[7].display == "Vision Exam"
assert inst.concept[8].code == "vision-glasses"
assert inst.concept[8].definition == "Frames and lenses"
assert inst.concept[8].display == "Vision Glasses"
assert inst.concept[9].code == "vision-contacts"
assert inst.concept[9].definition == "Contact Lenses"
assert inst.concept[9].display == "Vision Contacts Coverage"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.content == "complete"
assert inst.copyright == "This is an example set."
assert (
inst.description
== "This value set includes a smattering of Benefit type codes."
)
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "fm"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "draft"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 1
assert inst.id == "benefit-type"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.1176"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert (
inst.meta.profile[0]
== "http://hl7.org/fhir/StructureDefinition/shareablecodesystem"
)
assert inst.name == "BenefitTypeCodes"
assert inst.publisher == "Financial Management"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "Benefit Type Codes"
assert inst.url == "http://terminology.hl7.org/CodeSystem/benefit-type"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/benefit-type"
assert inst.version == "4.0.1"
def test_codesystem_10(base_settings):
"""No. 10 tests collection for CodeSystem.
Test File: codesystem-benefit-type.json
"""
filename = base_settings["unittest_data_dir"] / "codesystem-benefit-type.json"
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_10(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_10(inst2)
| # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/CodeSystem
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import codesystem
def impl_codesystem_1(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "source"
assert inst.concept[0].definition == (
"This structure describes an instance passed to the mapping "
"engine that is used a source of data."
)
assert inst.concept[0].display == "Source Structure Definition"
assert inst.concept[1].code == "queried"
assert inst.concept[1].definition == (
"This structure describes an instance that the mapping engine"
" may ask for that is used a source of data."
)
assert inst.concept[1].display == "Queried Structure Definition"
assert inst.concept[2].code == "target"
assert inst.concept[2].definition == (
"This structure describes an instance passed to the mapping "
"engine that is used a target of data."
)
assert inst.concept[2].display == "Target Structure Definition"
assert inst.concept[3].code == "produced"
assert inst.concept[3].definition == (
"This structure describes an instance that the mapping engine"
" may ask to create that is used a target of data."
)
assert inst.concept[3].display == "Produced Structure Definition"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert inst.description == "How the referenced structure is used in this mapping."
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "fhir"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 2
assert inst.id == "map-model-mode"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.676"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "StructureMapModelMode"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "StructureMapModelMode"
assert inst.url == "http://hl7.org/fhir/map-model-mode"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/map-model-mode"
assert inst.version == "4.0.1"
def test_codesystem_1(base_settings):
"""No. 1 tests collection for CodeSystem.
Test File: codesystem-map-model-mode.json
"""
filename = base_settings["unittest_data_dir"] / "codesystem-map-model-mode.json"
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_1(inst2)
def impl_codesystem_2(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "true"
assert inst.concept[0].definition == "Boolean true."
assert inst.concept[0].display == "true"
assert inst.concept[1].code == "false"
assert inst.concept[1].definition == "Boolean false."
assert inst.concept[1].display == "false"
assert inst.concept[2].code == "trace"
assert inst.concept[2].definition == (
"The content is greater than zero, but too small to be " "quantified."
)
assert inst.concept[2].display == "Trace Amount Detected"
assert inst.concept[3].code == "sufficient"
assert inst.concept[3].definition == (
"The specific quantity is not known, but is known to be non-"
"zero and is not specified because it makes up the bulk of "
"the material."
)
assert inst.concept[3].display == "Sufficient Quantity"
assert inst.concept[3].extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/codesystem-concept-" "comments"
)
assert inst.concept[3].extension[0].valueString == (
"used in formulations (e.g. 'Add 10mg of ingredient X, 50mg "
"of ingredient Y, and sufficient quantity of water to 100mL.'"
" This code would be used to express the quantity of water. )"
)
assert inst.concept[4].code == "withdrawn"
assert inst.concept[4].definition == "The value is no longer available."
assert inst.concept[4].display == "Value Withdrawn"
assert inst.concept[5].code == "nil-known"
assert (
inst.concept[5].definition
== "The are no known applicable values in this context."
)
assert inst.concept[5].display == "Nil Known"
assert inst.concept[5].extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/codesystem-concept-" "comments"
)
assert (
inst.concept[5].extension[0].valueString
== "The existence of this subject to review"
)
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert inst.description == (
"A set of generally useful codes defined so they can be "
"included in value sets."
)
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "fhir"
assert inst.id == "special-values"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.1049"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "SpecialValues"
assert inst.status == "draft"
assert inst.text.status == "extensions"
assert inst.title == "SpecialValues"
assert inst.url == "http://terminology.hl7.org/CodeSystem/special-values"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/special-values"
assert inst.version == "4.0.1"
def test_codesystem_2(base_settings):
"""No. 2 tests collection for CodeSystem.
Test File: codesystem-special-values.json
"""
filename = base_settings["unittest_data_dir"] / "codesystem-special-values.json"
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_2(inst2)
def impl_codesystem_3(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "unknown"
assert (
inst.concept[0].definition
== "The communication was not done due to an unknown reason."
)
assert inst.concept[0].display == "Unknown"
assert inst.concept[1].code == "system-error"
assert (
inst.concept[1].definition
== "The communication was not done due to a system error."
)
assert inst.concept[1].display == "System Error"
assert inst.concept[2].code == "invalid-phone-number"
assert inst.concept[2].definition == (
"The communication was not done due to an invalid phone " "number."
)
assert inst.concept[2].display == "Invalid Phone Number"
assert inst.concept[3].code == "recipient-unavailable"
assert inst.concept[3].definition == (
"The communication was not done due to the recipient being " "unavailable."
)
assert inst.concept[3].display == "Recipient Unavailable"
assert inst.concept[4].code == "family-objection"
assert (
inst.concept[4].definition
== "The communication was not done due to a family objection."
)
assert inst.concept[4].display == "Family Objection"
assert inst.concept[5].code == "patient-objection"
assert (
inst.concept[5].definition
== "The communication was not done due to a patient objection."
)
assert inst.concept[5].display == "Patient Objection"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert (
inst.description == "Codes for the reason why a communication did not happen."
)
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "pc"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "draft"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 1
assert inst.id == "communication-not-done-reason"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.1077"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "CommunicationNotDoneReason"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "CommunicationNotDoneReason"
assert inst.url == (
"http://terminology.hl7.org/CodeSystem/communication-not-" "done-reason"
)
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/communication-not-done-reason"
assert inst.version == "4.0.1"
def test_codesystem_3(base_settings):
"""No. 3 tests collection for CodeSystem.
Test File: codesystem-communication-not-done-reason.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "codesystem-communication-not-done-reason.json"
)
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_3(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_3(inst2)
def impl_codesystem_4(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "grouped-by"
assert inst.concept[0].display == "Grouped By"
assert inst.concept[1].code == "is-a"
assert inst.concept[1].display == "Is-A"
assert inst.concept[2].code == "part-of"
assert inst.concept[2].definition == (
"Child elements list the individual parts of a composite "
"whole (e.g. body site)."
)
assert inst.concept[2].display == "Part Of"
assert inst.concept[3].code == "classified-with"
assert inst.concept[3].display == "Classified With"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>.org"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert (
inst.description == "The meaning of the hierarchy of concepts in a code system."
)
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "vocab"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "normative"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"normative-version"
)
assert inst.extension[2].valueCode == "4.0.0"
assert inst.extension[3].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[3].valueInteger == 5
assert inst.id == "codesystem-hierarchy-meaning"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.785"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "CodeSystemHierarchyMeaning"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "active"
assert inst.text.status == "generated"
assert inst.title == "CodeSystemHierarchyMeaning"
assert inst.url == "http://hl7.org/fhir/codesystem-hierarchy-meaning"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/codesystem-hierarchy-meaning"
assert inst.version == "4.0.1"
def test_codesystem_4(base_settings):
"""No. 4 tests collection for CodeSystem.
Test File: codesystem-codesystem-hierarchy-meaning.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "codesystem-codesystem-hierarchy-meaning.json"
)
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_4(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_4(inst2)
def impl_codesystem_5(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "continuous"
assert inst.concept[0].definition == (
"A medication which is expected to be continued beyond the "
"present order and which the patient should be assumed to be "
"taking unless explicitly stopped."
)
assert inst.concept[0].display == "Continuous long term therapy"
assert inst.concept[1].code == "acute"
assert inst.concept[1].definition == (
"A medication which the patient is only expected to consume "
"for the duration of the current order and which is not "
"expected to be renewed."
)
assert inst.concept[1].display == "Short course (acute) therapy"
assert inst.concept[2].code == "seasonal"
assert inst.concept[2].definition == (
"A medication which is expected to be used on a part time "
"basis at certain times of the year"
)
assert inst.concept[2].display == "Seasonal"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.content == "complete"
assert inst.description == "MedicationRequest Course of Therapy Codes"
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "phx"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "draft"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 1
assert inst.id == "medicationrequest-course-of-therapy"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.1327"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert (
inst.meta.profile[0]
== "http://hl7.org/fhir/StructureDefinition/shareablecodesystem"
)
assert inst.name == "medicationRequest Course of Therapy Codes"
assert inst.publisher == "FHIR Project team"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "Medication request course of therapy codes"
assert inst.url == (
"http://terminology.hl7.org/CodeSystem/medicationrequest-" "course-of-therapy"
)
assert inst.valueSet == (
"http://hl7.org/fhir/ValueSet/medicationrequest-course-of-" "therapy"
)
assert inst.version == "4.0.1"
def test_codesystem_5(base_settings):
"""No. 5 tests collection for CodeSystem.
Test File: codesystem-medicationrequest-course-of-therapy.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "codesystem-medicationrequest-course-of-therapy.json"
)
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_5(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_5(inst2)
def impl_codesystem_6(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "0"
assert inst.concept[0].definition == (
"The operation completed successfully (whether with warnings " "or not)."
)
assert inst.concept[0].display == "Success"
assert inst.concept[1].code == "4"
assert inst.concept[1].definition == (
"The action was not successful due to some kind of minor "
"failure (often equivalent to an HTTP 400 response)."
)
assert inst.concept[1].display == "Minor failure"
assert inst.concept[2].code == "8"
assert inst.concept[2].definition == (
"The action was not successful due to some kind of unexpected"
" error (often equivalent to an HTTP 500 response)."
)
assert inst.concept[2].display == "Serious failure"
assert inst.concept[3].code == "12"
assert inst.concept[3].definition == (
"An error of such magnitude occurred that the system is no "
"longer available for use (i.e. the system died)."
)
assert inst.concept[3].display == "Major failure"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert inst.description == "Indicates whether the event succeeded or failed."
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "sec"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 3
assert inst.id == "audit-event-outcome"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.455"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "AuditEventOutcome"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "AuditEventOutcome"
assert inst.url == "http://hl7.org/fhir/audit-event-outcome"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/audit-event-outcome"
assert inst.version == "4.0.1"
def test_codesystem_6(base_settings):
"""No. 6 tests collection for CodeSystem.
Test File: codesystem-audit-event-outcome.json
"""
filename = (
base_settings["unittest_data_dir"] / "codesystem-audit-event-outcome.json"
)
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_6(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_6(inst2)
def impl_codesystem_7(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "disclosure-ca"
assert (
inst.concept[0].definition == "Canadian health information disclosure policy."
)
assert inst.concept[0].display == "Disclosure-CA"
assert inst.concept[1].code == "disclosure-us"
assert (
inst.concept[1].definition
== "United States health information disclosure policy."
)
assert inst.concept[1].display == "Disclosure-US"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.content == "complete"
assert inst.copyright == "This is an example set."
assert inst.description == "This value set includes sample Contract Subtype codes."
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "fm"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "draft"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 1
assert inst.id == "contract-subtype"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.1198"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert (
inst.meta.profile[0]
== "http://hl7.org/fhir/StructureDefinition/shareablecodesystem"
)
assert inst.name == "ContractSubtypeCodes"
assert inst.publisher == "Financial Management"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "Contract Subtype Codes"
assert inst.url == "http://terminology.hl7.org/CodeSystem/contractsubtypecodes"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/contract-subtype"
assert inst.version == "4.0.1"
def test_codesystem_7(base_settings):
"""No. 7 tests collection for CodeSystem.
Test File: codesystem-contract-subtype.json
"""
filename = base_settings["unittest_data_dir"] / "codesystem-contract-subtype.json"
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_7(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_7(inst2)
def impl_codesystem_8(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "create"
assert inst.concept[0].definition == (
"create(type : string) - type is passed through to the "
"application on the standard API, and must be known by it."
)
assert inst.concept[0].display == "create"
assert inst.concept[1].code == "copy"
assert inst.concept[1].definition == "copy(source)."
assert inst.concept[1].display == "copy"
assert inst.concept[2].code == "truncate"
assert (
inst.concept[2].definition
== "truncate(source, length) - source must be stringy type."
)
assert inst.concept[2].display == "truncate"
assert inst.concept[3].code == "escape"
assert inst.concept[3].definition == (
"escape(source, fmt1, fmt2) - change source from one kind of "
"escaping to another (plain, java, xml, json). note that this"
" is for when the string itself is escaped."
)
assert inst.concept[3].display == "escape"
assert inst.concept[4].code == "cast"
assert inst.concept[4].definition == (
"cast(source, type?) - case source from one type to another. "
"target type can be left as implicit if there is one and only"
" one target type known."
)
assert inst.concept[4].display == "cast"
assert inst.concept[5].code == "append"
assert (
inst.concept[5].definition == "append(source...) - source is element or string."
)
assert inst.concept[5].display == "append"
assert inst.concept[6].code == "translate"
assert (
inst.concept[6].definition
== "translate(source, uri_of_map) - use the translate operation."
)
assert inst.concept[6].display == "translate"
assert inst.concept[7].code == "reference"
assert inst.concept[7].definition == (
"reference(source : object) - return a string that references"
" the provided tree properly."
)
assert inst.concept[7].display == "reference"
assert inst.concept[8].code == "dateOp"
assert (
inst.concept[8].definition
== "Perform a date operation. *Parameters to be documented*."
)
assert inst.concept[8].display == "dateOp"
assert inst.concept[9].code == "uuid"
assert (
inst.concept[9].definition
== "Generate a random UUID (in lowercase). No Parameters."
)
assert inst.concept[9].display == "uuid"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert inst.description == "How data is copied/created."
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "fhir"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 2
assert inst.id == "map-transform"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.682"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "StructureMapTransform"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "StructureMapTransform"
assert inst.url == "http://hl7.org/fhir/map-transform"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/map-transform"
assert inst.version == "4.0.1"
def test_codesystem_8(base_settings):
"""No. 8 tests collection for CodeSystem.
Test File: codesystem-map-transform.json
"""
filename = base_settings["unittest_data_dir"] / "codesystem-map-transform.json"
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_8(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_8(inst2)
def impl_codesystem_9(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "registered"
assert inst.concept[0].definition == (
"The existence of the imaging study is registered, but there "
"is nothing yet available."
)
assert inst.concept[0].display == "Registered"
assert inst.concept[1].code == "available"
assert inst.concept[1].definition == (
"At least one instance has been associated with this imaging " "study."
)
assert inst.concept[1].display == "Available"
assert inst.concept[2].code == "cancelled"
assert inst.concept[2].definition == (
"The imaging study is unavailable because the imaging study "
"was not started or not completed (also sometimes called "
'"aborted").'
)
assert inst.concept[2].display == "Cancelled"
assert inst.concept[3].code == "entered-in-error"
assert inst.concept[3].display == "Entered in Error"
assert inst.concept[4].code == "unknown"
assert inst.concept[4].display == "Unknown"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "<EMAIL>"
assert inst.content == "complete"
assert inst.date == fhirtypes.DateTime.validate("2019-11-01T09:29:23+11:00")
assert inst.description == "The status of the ImagingStudy."
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "ii"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 3
assert inst.id == "imagingstudy-status"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.991"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert inst.name == "ImagingStudyStatus"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "ImagingStudyStatus"
assert inst.url == "http://hl7.org/fhir/imagingstudy-status"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/imagingstudy-status"
assert inst.version == "4.0.1"
def test_codesystem_9(base_settings):
"""No. 9 tests collection for CodeSystem.
Test File: codesystem-imagingstudy-status.json
"""
filename = (
base_settings["unittest_data_dir"] / "codesystem-imagingstudy-status.json"
)
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_9(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_9(inst2)
def impl_codesystem_10(inst):
assert inst.caseSensitive is True
assert inst.concept[0].code == "benefit"
assert inst.concept[0].definition == "Maximum benefit allowable."
assert inst.concept[0].display == "Benefit"
assert inst.concept[1].code == "deductible"
assert (
inst.concept[1].definition == "Cost to be incurred before benefits are applied"
)
assert inst.concept[1].display == "Deductible"
assert inst.concept[2].code == "visit"
assert inst.concept[2].definition == "Service visit"
assert inst.concept[2].display == "Visit"
assert inst.concept[3].code == "room"
assert inst.concept[3].definition == "Type of room"
assert inst.concept[3].display == "Room"
assert inst.concept[4].code == "copay"
assert inst.concept[4].definition == "Copayment per service"
assert inst.concept[4].display == "Copayment per service"
assert inst.concept[5].code == "copay-percent"
assert inst.concept[5].definition == "Copayment percentage per service"
assert inst.concept[5].display == "Copayment Percent per service"
assert inst.concept[6].code == "copay-maximum"
assert inst.concept[6].definition == "Copayment maximum per service"
assert inst.concept[6].display == "Copayment maximum per service"
assert inst.concept[7].code == "vision-exam"
assert inst.concept[7].definition == "Vision Exam"
assert inst.concept[7].display == "Vision Exam"
assert inst.concept[8].code == "vision-glasses"
assert inst.concept[8].definition == "Frames and lenses"
assert inst.concept[8].display == "Vision Glasses"
assert inst.concept[9].code == "vision-contacts"
assert inst.concept[9].definition == "Contact Lenses"
assert inst.concept[9].display == "Vision Contacts Coverage"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.content == "complete"
assert inst.copyright == "This is an example set."
assert (
inst.description
== "This value set includes a smattering of Benefit type codes."
)
assert inst.experimental is False
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "wg"
)
assert inst.extension[0].valueCode == "fm"
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "draft"
assert inst.extension[2].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[2].valueInteger == 1
assert inst.id == "benefit-type"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:2.16.840.1.113883.4.642.4.1176"
assert inst.meta.lastUpdated == fhirtypes.Instant.validate(
"2019-11-01T09:29:23.356+11:00"
)
assert (
inst.meta.profile[0]
== "http://hl7.org/fhir/StructureDefinition/shareablecodesystem"
)
assert inst.name == "BenefitTypeCodes"
assert inst.publisher == "Financial Management"
assert inst.status == "draft"
assert inst.text.status == "generated"
assert inst.title == "Benefit Type Codes"
assert inst.url == "http://terminology.hl7.org/CodeSystem/benefit-type"
assert inst.valueSet == "http://hl7.org/fhir/ValueSet/benefit-type"
assert inst.version == "4.0.1"
def test_codesystem_10(base_settings):
"""No. 10 tests collection for CodeSystem.
Test File: codesystem-benefit-type.json
"""
filename = base_settings["unittest_data_dir"] / "codesystem-benefit-type.json"
inst = codesystem.CodeSystem.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "CodeSystem" == inst.resource_type
impl_codesystem_10(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "CodeSystem" == data["resourceType"]
inst2 = codesystem.CodeSystem(**data)
impl_codesystem_10(inst2) | en | 0.761155 | # -*- coding: utf-8 -*- Profile: http://hl7.org/fhir/StructureDefinition/CodeSystem Release: R4 Version: 4.0.1 Build ID: 9346c8cc45 Last updated: 2019-11-01T09:29:23.356+11:00 # noqa: F401 # noqa: F401 No. 1 tests collection for CodeSystem. Test File: codesystem-map-model-mode.json # testing reverse by generating data from itself and create again. No. 2 tests collection for CodeSystem. Test File: codesystem-special-values.json # testing reverse by generating data from itself and create again. No. 3 tests collection for CodeSystem. Test File: codesystem-communication-not-done-reason.json # testing reverse by generating data from itself and create again. No. 4 tests collection for CodeSystem. Test File: codesystem-codesystem-hierarchy-meaning.json # testing reverse by generating data from itself and create again. No. 5 tests collection for CodeSystem. Test File: codesystem-medicationrequest-course-of-therapy.json # testing reverse by generating data from itself and create again. No. 6 tests collection for CodeSystem. Test File: codesystem-audit-event-outcome.json # testing reverse by generating data from itself and create again. No. 7 tests collection for CodeSystem. Test File: codesystem-contract-subtype.json # testing reverse by generating data from itself and create again. No. 8 tests collection for CodeSystem. Test File: codesystem-map-transform.json # testing reverse by generating data from itself and create again. No. 9 tests collection for CodeSystem. Test File: codesystem-imagingstudy-status.json # testing reverse by generating data from itself and create again. No. 10 tests collection for CodeSystem. Test File: codesystem-benefit-type.json # testing reverse by generating data from itself and create again. | 2.179601 | 2 |
sparsePlane/sparseplane/utils/metrics.py | jinlinyi/SparsePlanes | 69 | 6612587 | import torch
import numpy as np
@torch.no_grad()
def compare_planes(
pred_planes,
gt_planes,
):
"""
naively calculate 3d vector l2 distance
"""
pred_planes = torch.tensor(np.array(pred_planes), dtype=torch.float32)
pred_offsets = torch.norm(pred_planes, p=2, dim=1) + 1e-5
pred_norms = pred_planes.div(pred_offsets.view(-1, 1).expand_as(pred_planes))
gt_planes = torch.tensor(np.array(gt_planes), dtype=torch.float32)
gt_offsets = torch.norm(gt_planes, p=2, dim=1) + 1e-5
gt_norms = gt_planes.div(gt_offsets.view(-1, 1).expand_as(gt_planes))
norm_distance_matrix = torch.clamp(torch.cdist(pred_norms, gt_norms, p=2), 0, 2)
norm_angle_matrix = 2 * torch.asin(norm_distance_matrix / 2) / np.pi * 180
offset_distance_matrix = torch.cdist(
pred_offsets.view(-1, 1), gt_offsets.view(-1, 1), p=1
)
return {"norm": norm_angle_matrix, "offset": offset_distance_matrix}
def compare_planes_one_to_one(
pred_planes,
gt_planes,
):
pred_planes = torch.tensor(np.array(pred_planes), dtype=torch.float32)
pred_offsets = torch.clamp(torch.norm(pred_planes, p=2, dim=1), min=1e-5)
pred_norms = pred_planes.div(pred_offsets.view(-1, 1).expand_as(pred_planes))
gt_planes = torch.tensor(np.array(gt_planes), dtype=torch.float32)
gt_offsets = torch.clamp(torch.norm(gt_planes, p=2, dim=1), min=1e-5)
gt_norms = gt_planes.div(gt_offsets.view(-1, 1).expand_as(gt_planes))
l2 = torch.norm(pred_planes - gt_planes, dim=1).numpy().mean()
norm = (
torch.acos(torch.clamp(torch.sum(pred_norms * gt_norms, dim=1), max=1, min=-1))
.numpy()
.mean()
)
offset = torch.abs(pred_offsets - gt_offsets).numpy().mean()
return {"l2": l2, "norm": norm, "offset": offset}
| import torch
import numpy as np
@torch.no_grad()
def compare_planes(
pred_planes,
gt_planes,
):
"""
naively calculate 3d vector l2 distance
"""
pred_planes = torch.tensor(np.array(pred_planes), dtype=torch.float32)
pred_offsets = torch.norm(pred_planes, p=2, dim=1) + 1e-5
pred_norms = pred_planes.div(pred_offsets.view(-1, 1).expand_as(pred_planes))
gt_planes = torch.tensor(np.array(gt_planes), dtype=torch.float32)
gt_offsets = torch.norm(gt_planes, p=2, dim=1) + 1e-5
gt_norms = gt_planes.div(gt_offsets.view(-1, 1).expand_as(gt_planes))
norm_distance_matrix = torch.clamp(torch.cdist(pred_norms, gt_norms, p=2), 0, 2)
norm_angle_matrix = 2 * torch.asin(norm_distance_matrix / 2) / np.pi * 180
offset_distance_matrix = torch.cdist(
pred_offsets.view(-1, 1), gt_offsets.view(-1, 1), p=1
)
return {"norm": norm_angle_matrix, "offset": offset_distance_matrix}
def compare_planes_one_to_one(
pred_planes,
gt_planes,
):
pred_planes = torch.tensor(np.array(pred_planes), dtype=torch.float32)
pred_offsets = torch.clamp(torch.norm(pred_planes, p=2, dim=1), min=1e-5)
pred_norms = pred_planes.div(pred_offsets.view(-1, 1).expand_as(pred_planes))
gt_planes = torch.tensor(np.array(gt_planes), dtype=torch.float32)
gt_offsets = torch.clamp(torch.norm(gt_planes, p=2, dim=1), min=1e-5)
gt_norms = gt_planes.div(gt_offsets.view(-1, 1).expand_as(gt_planes))
l2 = torch.norm(pred_planes - gt_planes, dim=1).numpy().mean()
norm = (
torch.acos(torch.clamp(torch.sum(pred_norms * gt_norms, dim=1), max=1, min=-1))
.numpy()
.mean()
)
offset = torch.abs(pred_offsets - gt_offsets).numpy().mean()
return {"l2": l2, "norm": norm, "offset": offset}
| en | 0.827234 | naively calculate 3d vector l2 distance | 2.413396 | 2 |
src/visualisation/models.py | jacobic/redpipes | 0 | 6612588 | <reponame>jacobic/redpipes<filename>src/visualisation/models.py
from astropy.io import fits
# This backend is required for X11 forwarding.
import matplotlib
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import CubicSpline
import src.globals as glo
from src.utils import Str, table_to_dict
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# plt.switch_backend('Agg')
import matplotlib as mpl
import matplotlib.colors as mplc
from astropy import wcs
from astropy import units as u
from astropy.visualization import ZScaleInterval
from regions import CircleSkyRegion
import logging
import pandas as pd
import numpy as np
import pickle
import matplotlib.gridspec as gridspec
from mpl_toolkits import axes_grid1
from reproject import reproject_interp, reproject_exact
import os
import src.globals as glo
from src.utils import Str
# def plot_models(name_models="rs_norm_slope"):
# matplotlib.rcParams.update({
# 'font.size': 25})
#
# fig = plt.figure(figsize=(45, 15))
# ax0 = fig.add_subplot(141)
# ax1 = fig.add_subplot(142)
# ax2 = fig.add_subplot(143)
# axs = [ax0, ax1, ax2]
#
# # The models of red sequence width.
# # Redshift bins.
# z_bins = np.array([(0.01 * i) + 0.05 for i in range(75)])
# # Magnitude bins, this is required for the interpolation step.
# i_bins = np.array([(0.5 * i) + 14.75 for i in range(18)])
# # Plot every nth point
# # n = 10
# # mag_auto_i = i_bins[0::n]
# mag_auto_i = i_bins
#
# # cmap = plt.get_cmap('plasma')
# cmap = glo.cm
# divider = make_axes_locatable(ax2)
# cax = divider.append_axes('right', size='5%', pad=0.15)
# normal = plt.Normalize(vmin=0, vmax=np.min(z_bins))
# c_norm = cmap(plt.Normalize(min(z_bins), max(z_bins))(z_bins))
# norm = mpl.colors.Normalize(vmin=0, vmax=np.min(z_bins))
#
# # Load variables from red sequence models Tables are also numpy arrays.
# path_models = os.path.join(glo.dir_models, name_models)
# models = pd.read_table(path_models, delim_whitespace=True, header=0)
#
# settings = {
# 'MIN_MAGERR_DETMODEL': [0.05, 0.05, 0.03],
# 'CORRECTION_MAG_DETMODEL': [0.045091365, -0.052124453, 0.019468499],
# 'MIN_RS_MODEL_WIDTH': [0.15, 0.1, 0.05],
# 'MAX_RS_MODEL_WIDTH_IDX': [70, 55, 70]}
#
# settings = pd.DataFrame.from_dict(settings).set_index([glo.col_options])
# for i, col in enumerate(glo.col_options):
# name_width = "rs_width_{0:l}".format(col)
# width_model = np.loadtxt(os.path.join(glo.dir_models, name_width))
# # The as_matrix() method converts each pandas.series to a np.array.
# z_model = models['REDSHIFT'].as_matrix()
# norm_model = models['NORMALISATION_{0:u}'.format(col)].as_matrix()
# slope_model = models['SLOPE_{0:u}'.format(col)].as_matrix()
#
# config = settings.loc[col, :]
#
# # For easy formatting.
# col = Str(col)
#
# # Increase minimum intrinsic scatter.
# min_width_model = config.loc['MIN_RS_MODEL_WIDTH']
#
# # The following warning is to be expected, don't worry as it is
# masked.
# # RuntimeWarning: invalid value encountered in less.
# width_model[np.ma.masked_invalid(
# width_model) < min_width_model] = min_width_model
#
# # The red sequence model widths begin to break down at high redshift
# # so a x_lim is enforced to prevent extrapolating into this regime.
# idx_max_width = int(config.loc['MAX_RS_MODEL_WIDTH_IDX'])
#
# for j, z in enumerate(z_bins):
# # Determine idx corresponding to the the redshift step in the red
# # sequence
# # model data that is most similar redshift of the candidate.
# idx_model = np.argmin(np.absolute(z_model - z))
# # Determine the col distance from the red sequence.
# # Imagine col (y-axis) vs magnitude (x-axis) with y = mx + c
# # mag_auto_i = np.arrange(10, 23, 1)
# col_model = (slope_model[idx_model] * mag_auto_i) + norm_model[
# idx_model]
#
# idx_candidate = np.argmin(np.absolute(z_bins - z))
# idx_galaxy = np.nanmin([idx_candidate, idx_max_width])
# red_sequence_width = width_model[idx_galaxy]
#
# # Filter out NaN values before interpolating. Note ~ is the
# # invert operator.
# idx_interpol = ~np.isnan(red_sequence_width)
#
# # Interpolate data with a piecewise cubic polynomial to
# generate new
# # data points for each of the i mag auto values.
# interpolate_col = CubicSpline(i_bins[idx_interpol],
# red_sequence_width[idx_interpol])
# col_scatter = interpolate_col(mag_auto_i)
#
# axs[i].plot(mag_auto_i, col_model, color=c_norm[j])
# axs[i].set_xlabel('i')
# axs[i].set_ylabel('{0:l} - {1:l}'.format(col[0], col[1]))
# axs[i].set_xlim(17, 23)
# axs[i].set_ylim(0, 2)
# axs[i].set_yticks([0, 0.5, 1, 1.5, 2])
# axs[i].set_xticks([17, 19, 21, 23])
#
# cbar = mpl.colorbar.ColorbarBase(ax=cax, cmap=cmap, norm=norm,
# orientation='vertical',
# ticks=[0, 0.2, 0.4, 0.6])
# cbar.set_label('Redshift')
# # cbar.ax.set_yticks()
# # cbar.ax.set_yticklabels(['0', '0.2', '0.4', '0.6'])
#
# data_out = os.path.join(glo.dir_figs, 'models.png')
# plt.savefig(data_out, format='png', dpi=300)
def plot_models_poster(name_models="rs_norm_slope", seperate=True):
matplotlib.rcParams.update({
'font.size': 25})
fig = plt.figure(figsize=(11, 14))
ax2 = fig.add_subplot(313)
ax0 = fig.add_subplot(311, sharex=ax2)
ax1 = fig.add_subplot(312, sharex=ax2)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax1.get_xticklabels(), visible=False)
# fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.tight_layout()
axs = [ax0, ax1, ax2]
# The models of red sequence width.
# Redshift bins.
z_bins = np.array([(0.01 * i) + 0.05 for i in range(75)])
# Magnitude bins, this is required for the interpolation step.
i_bins = np.array([(0.5 * i) + 14.75 for i in range(18)])
# Plot every nth point
# n = 10
# mag_auto_i = i_bins[0::n]
mag_auto_i = i_bins
# cmap = plt.get_cmap('plasma')
cmap = glo.cm
# TODO: add a single redshift axes to the entire subplot
# divider = make_axes_locatable(ax2)
# cax = divider.append_axes('right', size='5%', pad=0.15)
normal = plt.Normalize(vmin=0, vmax=np.min(z_bins))
c_norm = cmap(plt.Normalize(min(z_bins), max(z_bins))(z_bins))
norm = mpl.colors.Normalize(vmin=0, vmax=np.min(z_bins))
# Load variables from red sequence models Tables are also numpy arrays.
path_models = os.path.join(glo.DIR_MODELS, name_models)
models = pd.read_table(path_models, delim_whitespace=True, header=0)
settings = {
'MIN_MAGERR_DETMODEL': [0.05, 0.05, 0.03],
'CORRECTION_MAG_DETMODEL': [0.045091365, -0.052124453, 0.019468499],
'MIN_RS_MODEL_WIDTH': [0.15, 0.1, 0.05],
'MAX_RS_MODEL_WIDTH_IDX': [70, 55, 70]}
settings = pd.DataFrame.from_dict(settings).set_index([glo.col_options])
for i, col in enumerate(glo.col_options):
axs[i].grid(True, linestyle='dashed')
name_width = "rs_width_{0:l}".format(col)
width_model = np.loadtxt(os.path.join(glo.DIR_MODELS, name_width))
# The as_matrix() method converts each pandas.series to a np.array.
z_model = models['REDSHIFT'].as_matrix()
norm_model = models['NORMALISATION_{0:u}'.format(col)].as_matrix()
slope_model = models['SLOPE_{0:u}'.format(col)].as_matrix()
config = settings.loc[col, :]
# For easy formatting.
col = Str(col)
# Increase minimum intrinsic scatter.
min_width_model = config.loc['MIN_RS_MODEL_WIDTH']
# The following warning is to be expected, don't worry as it is masked.
# RuntimeWarning: invalid value encountered in less.
width_model[np.ma.masked_invalid(
width_model) < min_width_model] = min_width_model
# The red sequence model widths begin to break down at high redshift
# so a x_lim is enforced to prevent extrapolating into this regime.
idx_max_width = int(config.loc['MAX_RS_MODEL_WIDTH_IDX'])
for j, z in enumerate(z_bins):
# Determine idx corresponding to the the redshift step in the red
# sequence
# model data that is most similar redshift of the candidate.
idx_model = np.argmin(np.absolute(z_model - z))
# Determine the col distance from the red sequence.
# Imagine col (y-axis) vs magnitude (x-axis) with y = mx + c
# mag_auto_i = np.arrange(10, 23, 1)
col_model = (slope_model[idx_model] * mag_auto_i) + norm_model[
idx_model]
idx_candidate = np.argmin(np.absolute(z_bins - z))
idx_galaxy = np.nanmin([idx_candidate, idx_max_width])
red_sequence_width = width_model[idx_galaxy]
# Filter out NaN values before interpolating. Note ~ is the
# invert operator.
idx_interpol = ~np.isnan(red_sequence_width)
# Interpolate data with a piecewise cubic polynomial to generate new
# data points for each of the i mag auto values.
interpolate_col = CubicSpline(i_bins[idx_interpol],
red_sequence_width[idx_interpol])
col_scatter = interpolate_col(mag_auto_i)
cb = axs[i].plot(mag_auto_i, col_model, color=c_norm[j])
axs[i].set_ylabel('{0:l} - {1:l}'.format(col[0], col[1]))
axs[i].set_xlim(17, 23)
axs[i].set_ylim(0, 2)
axs[i].set_yticks(
[0, 0.5, 1, 1.5, 2]) # axs[i].set_xticks([17, 19, 21, 23])
# plt.colorbar(cb, ax=axs[i]) # , orientation='vertical', # #
# ticks=[0, 0.2, 0.4, 0.6])
ax2.set_xlabel('i')
sm = plt.cm.ScalarMappable(cmap=glo.cm,
norm=plt.Normalize(vmin=0, vmax=np.max(z_bins)))
# fake up the array of the scalar mappable. Urgh…
sm._A = []
# plt.colorbar(sm)
fig.subplots_adjust(right=0.8)
# cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cb_fig = fig.colorbar(sm, cax=cbar_ax)
cb_fig.ax.set_title('z')
# cbar.set_label('Redshift')
# cbar.ax.set_yticks()
# cbar.ax.set_yticklabels(['0', '0.2', '0.4', '0.6'])
# plt.tight_layout()
data_out = os.path.join(glo.DIR_FIGS, 'models.png')
plt.savefig(data_out, format='png', dpi=300)
if seperate is True:
for i, ax in enumerate([ax0, ax1, ax2]):
if i == 2:
foo = 1.4
else:
foo = 1.2
extent = ax.get_window_extent().transformed(
fig.dpi_scale_trans.inverted())
fig.savefig(
os.path.join(glo.DIR_FIGS, 'models.ax{0}.png'.format(i)),
bbox_inches=extent.expanded(1.2, foo), dpi=800)
if __name__ == '__main__':
plot_models_poster()
| from astropy.io import fits
# This backend is required for X11 forwarding.
import matplotlib
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import CubicSpline
import src.globals as glo
from src.utils import Str, table_to_dict
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# plt.switch_backend('Agg')
import matplotlib as mpl
import matplotlib.colors as mplc
from astropy import wcs
from astropy import units as u
from astropy.visualization import ZScaleInterval
from regions import CircleSkyRegion
import logging
import pandas as pd
import numpy as np
import pickle
import matplotlib.gridspec as gridspec
from mpl_toolkits import axes_grid1
from reproject import reproject_interp, reproject_exact
import os
import src.globals as glo
from src.utils import Str
# def plot_models(name_models="rs_norm_slope"):
# matplotlib.rcParams.update({
# 'font.size': 25})
#
# fig = plt.figure(figsize=(45, 15))
# ax0 = fig.add_subplot(141)
# ax1 = fig.add_subplot(142)
# ax2 = fig.add_subplot(143)
# axs = [ax0, ax1, ax2]
#
# # The models of red sequence width.
# # Redshift bins.
# z_bins = np.array([(0.01 * i) + 0.05 for i in range(75)])
# # Magnitude bins, this is required for the interpolation step.
# i_bins = np.array([(0.5 * i) + 14.75 for i in range(18)])
# # Plot every nth point
# # n = 10
# # mag_auto_i = i_bins[0::n]
# mag_auto_i = i_bins
#
# # cmap = plt.get_cmap('plasma')
# cmap = glo.cm
# divider = make_axes_locatable(ax2)
# cax = divider.append_axes('right', size='5%', pad=0.15)
# normal = plt.Normalize(vmin=0, vmax=np.min(z_bins))
# c_norm = cmap(plt.Normalize(min(z_bins), max(z_bins))(z_bins))
# norm = mpl.colors.Normalize(vmin=0, vmax=np.min(z_bins))
#
# # Load variables from red sequence models Tables are also numpy arrays.
# path_models = os.path.join(glo.dir_models, name_models)
# models = pd.read_table(path_models, delim_whitespace=True, header=0)
#
# settings = {
# 'MIN_MAGERR_DETMODEL': [0.05, 0.05, 0.03],
# 'CORRECTION_MAG_DETMODEL': [0.045091365, -0.052124453, 0.019468499],
# 'MIN_RS_MODEL_WIDTH': [0.15, 0.1, 0.05],
# 'MAX_RS_MODEL_WIDTH_IDX': [70, 55, 70]}
#
# settings = pd.DataFrame.from_dict(settings).set_index([glo.col_options])
# for i, col in enumerate(glo.col_options):
# name_width = "rs_width_{0:l}".format(col)
# width_model = np.loadtxt(os.path.join(glo.dir_models, name_width))
# # The as_matrix() method converts each pandas.series to a np.array.
# z_model = models['REDSHIFT'].as_matrix()
# norm_model = models['NORMALISATION_{0:u}'.format(col)].as_matrix()
# slope_model = models['SLOPE_{0:u}'.format(col)].as_matrix()
#
# config = settings.loc[col, :]
#
# # For easy formatting.
# col = Str(col)
#
# # Increase minimum intrinsic scatter.
# min_width_model = config.loc['MIN_RS_MODEL_WIDTH']
#
# # The following warning is to be expected, don't worry as it is
# masked.
# # RuntimeWarning: invalid value encountered in less.
# width_model[np.ma.masked_invalid(
# width_model) < min_width_model] = min_width_model
#
# # The red sequence model widths begin to break down at high redshift
# # so a x_lim is enforced to prevent extrapolating into this regime.
# idx_max_width = int(config.loc['MAX_RS_MODEL_WIDTH_IDX'])
#
# for j, z in enumerate(z_bins):
# # Determine idx corresponding to the the redshift step in the red
# # sequence
# # model data that is most similar redshift of the candidate.
# idx_model = np.argmin(np.absolute(z_model - z))
# # Determine the col distance from the red sequence.
# # Imagine col (y-axis) vs magnitude (x-axis) with y = mx + c
# # mag_auto_i = np.arrange(10, 23, 1)
# col_model = (slope_model[idx_model] * mag_auto_i) + norm_model[
# idx_model]
#
# idx_candidate = np.argmin(np.absolute(z_bins - z))
# idx_galaxy = np.nanmin([idx_candidate, idx_max_width])
# red_sequence_width = width_model[idx_galaxy]
#
# # Filter out NaN values before interpolating. Note ~ is the
# # invert operator.
# idx_interpol = ~np.isnan(red_sequence_width)
#
# # Interpolate data with a piecewise cubic polynomial to
# generate new
# # data points for each of the i mag auto values.
# interpolate_col = CubicSpline(i_bins[idx_interpol],
# red_sequence_width[idx_interpol])
# col_scatter = interpolate_col(mag_auto_i)
#
# axs[i].plot(mag_auto_i, col_model, color=c_norm[j])
# axs[i].set_xlabel('i')
# axs[i].set_ylabel('{0:l} - {1:l}'.format(col[0], col[1]))
# axs[i].set_xlim(17, 23)
# axs[i].set_ylim(0, 2)
# axs[i].set_yticks([0, 0.5, 1, 1.5, 2])
# axs[i].set_xticks([17, 19, 21, 23])
#
# cbar = mpl.colorbar.ColorbarBase(ax=cax, cmap=cmap, norm=norm,
# orientation='vertical',
# ticks=[0, 0.2, 0.4, 0.6])
# cbar.set_label('Redshift')
# # cbar.ax.set_yticks()
# # cbar.ax.set_yticklabels(['0', '0.2', '0.4', '0.6'])
#
# data_out = os.path.join(glo.dir_figs, 'models.png')
# plt.savefig(data_out, format='png', dpi=300)
def plot_models_poster(name_models="rs_norm_slope", seperate=True):
matplotlib.rcParams.update({
'font.size': 25})
fig = plt.figure(figsize=(11, 14))
ax2 = fig.add_subplot(313)
ax0 = fig.add_subplot(311, sharex=ax2)
ax1 = fig.add_subplot(312, sharex=ax2)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax1.get_xticklabels(), visible=False)
# fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.tight_layout()
axs = [ax0, ax1, ax2]
# The models of red sequence width.
# Redshift bins.
z_bins = np.array([(0.01 * i) + 0.05 for i in range(75)])
# Magnitude bins, this is required for the interpolation step.
i_bins = np.array([(0.5 * i) + 14.75 for i in range(18)])
# Plot every nth point
# n = 10
# mag_auto_i = i_bins[0::n]
mag_auto_i = i_bins
# cmap = plt.get_cmap('plasma')
cmap = glo.cm
# TODO: add a single redshift axes to the entire subplot
# divider = make_axes_locatable(ax2)
# cax = divider.append_axes('right', size='5%', pad=0.15)
normal = plt.Normalize(vmin=0, vmax=np.min(z_bins))
c_norm = cmap(plt.Normalize(min(z_bins), max(z_bins))(z_bins))
norm = mpl.colors.Normalize(vmin=0, vmax=np.min(z_bins))
# Load variables from red sequence models Tables are also numpy arrays.
path_models = os.path.join(glo.DIR_MODELS, name_models)
models = pd.read_table(path_models, delim_whitespace=True, header=0)
settings = {
'MIN_MAGERR_DETMODEL': [0.05, 0.05, 0.03],
'CORRECTION_MAG_DETMODEL': [0.045091365, -0.052124453, 0.019468499],
'MIN_RS_MODEL_WIDTH': [0.15, 0.1, 0.05],
'MAX_RS_MODEL_WIDTH_IDX': [70, 55, 70]}
settings = pd.DataFrame.from_dict(settings).set_index([glo.col_options])
for i, col in enumerate(glo.col_options):
axs[i].grid(True, linestyle='dashed')
name_width = "rs_width_{0:l}".format(col)
width_model = np.loadtxt(os.path.join(glo.DIR_MODELS, name_width))
# The as_matrix() method converts each pandas.series to a np.array.
z_model = models['REDSHIFT'].as_matrix()
norm_model = models['NORMALISATION_{0:u}'.format(col)].as_matrix()
slope_model = models['SLOPE_{0:u}'.format(col)].as_matrix()
config = settings.loc[col, :]
# For easy formatting.
col = Str(col)
# Increase minimum intrinsic scatter.
min_width_model = config.loc['MIN_RS_MODEL_WIDTH']
# The following warning is to be expected, don't worry as it is masked.
# RuntimeWarning: invalid value encountered in less.
width_model[np.ma.masked_invalid(
width_model) < min_width_model] = min_width_model
# The red sequence model widths begin to break down at high redshift
# so a x_lim is enforced to prevent extrapolating into this regime.
idx_max_width = int(config.loc['MAX_RS_MODEL_WIDTH_IDX'])
for j, z in enumerate(z_bins):
# Determine idx corresponding to the the redshift step in the red
# sequence
# model data that is most similar redshift of the candidate.
idx_model = np.argmin(np.absolute(z_model - z))
# Determine the col distance from the red sequence.
# Imagine col (y-axis) vs magnitude (x-axis) with y = mx + c
# mag_auto_i = np.arrange(10, 23, 1)
col_model = (slope_model[idx_model] * mag_auto_i) + norm_model[
idx_model]
idx_candidate = np.argmin(np.absolute(z_bins - z))
idx_galaxy = np.nanmin([idx_candidate, idx_max_width])
red_sequence_width = width_model[idx_galaxy]
# Filter out NaN values before interpolating. Note ~ is the
# invert operator.
idx_interpol = ~np.isnan(red_sequence_width)
# Interpolate data with a piecewise cubic polynomial to generate new
# data points for each of the i mag auto values.
interpolate_col = CubicSpline(i_bins[idx_interpol],
red_sequence_width[idx_interpol])
col_scatter = interpolate_col(mag_auto_i)
cb = axs[i].plot(mag_auto_i, col_model, color=c_norm[j])
axs[i].set_ylabel('{0:l} - {1:l}'.format(col[0], col[1]))
axs[i].set_xlim(17, 23)
axs[i].set_ylim(0, 2)
axs[i].set_yticks(
[0, 0.5, 1, 1.5, 2]) # axs[i].set_xticks([17, 19, 21, 23])
# plt.colorbar(cb, ax=axs[i]) # , orientation='vertical', # #
# ticks=[0, 0.2, 0.4, 0.6])
ax2.set_xlabel('i')
sm = plt.cm.ScalarMappable(cmap=glo.cm,
norm=plt.Normalize(vmin=0, vmax=np.max(z_bins)))
# fake up the array of the scalar mappable. Urgh…
sm._A = []
# plt.colorbar(sm)
fig.subplots_adjust(right=0.8)
# cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cb_fig = fig.colorbar(sm, cax=cbar_ax)
cb_fig.ax.set_title('z')
# cbar.set_label('Redshift')
# cbar.ax.set_yticks()
# cbar.ax.set_yticklabels(['0', '0.2', '0.4', '0.6'])
# plt.tight_layout()
data_out = os.path.join(glo.DIR_FIGS, 'models.png')
plt.savefig(data_out, format='png', dpi=300)
if seperate is True:
for i, ax in enumerate([ax0, ax1, ax2]):
if i == 2:
foo = 1.4
else:
foo = 1.2
extent = ax.get_window_extent().transformed(
fig.dpi_scale_trans.inverted())
fig.savefig(
os.path.join(glo.DIR_FIGS, 'models.ax{0}.png'.format(i)),
bbox_inches=extent.expanded(1.2, foo), dpi=800)
if __name__ == '__main__':
plot_models_poster() | en | 0.489589 | # This backend is required for X11 forwarding. # plt.switch_backend('Agg') # def plot_models(name_models="rs_norm_slope"): # matplotlib.rcParams.update({ # 'font.size': 25}) # # fig = plt.figure(figsize=(45, 15)) # ax0 = fig.add_subplot(141) # ax1 = fig.add_subplot(142) # ax2 = fig.add_subplot(143) # axs = [ax0, ax1, ax2] # # # The models of red sequence width. # # Redshift bins. # z_bins = np.array([(0.01 * i) + 0.05 for i in range(75)]) # # Magnitude bins, this is required for the interpolation step. # i_bins = np.array([(0.5 * i) + 14.75 for i in range(18)]) # # Plot every nth point # # n = 10 # # mag_auto_i = i_bins[0::n] # mag_auto_i = i_bins # # # cmap = plt.get_cmap('plasma') # cmap = glo.cm # divider = make_axes_locatable(ax2) # cax = divider.append_axes('right', size='5%', pad=0.15) # normal = plt.Normalize(vmin=0, vmax=np.min(z_bins)) # c_norm = cmap(plt.Normalize(min(z_bins), max(z_bins))(z_bins)) # norm = mpl.colors.Normalize(vmin=0, vmax=np.min(z_bins)) # # # Load variables from red sequence models Tables are also numpy arrays. # path_models = os.path.join(glo.dir_models, name_models) # models = pd.read_table(path_models, delim_whitespace=True, header=0) # # settings = { # 'MIN_MAGERR_DETMODEL': [0.05, 0.05, 0.03], # 'CORRECTION_MAG_DETMODEL': [0.045091365, -0.052124453, 0.019468499], # 'MIN_RS_MODEL_WIDTH': [0.15, 0.1, 0.05], # 'MAX_RS_MODEL_WIDTH_IDX': [70, 55, 70]} # # settings = pd.DataFrame.from_dict(settings).set_index([glo.col_options]) # for i, col in enumerate(glo.col_options): # name_width = "rs_width_{0:l}".format(col) # width_model = np.loadtxt(os.path.join(glo.dir_models, name_width)) # # The as_matrix() method converts each pandas.series to a np.array. # z_model = models['REDSHIFT'].as_matrix() # norm_model = models['NORMALISATION_{0:u}'.format(col)].as_matrix() # slope_model = models['SLOPE_{0:u}'.format(col)].as_matrix() # # config = settings.loc[col, :] # # # For easy formatting. # col = Str(col) # # # Increase minimum intrinsic scatter. # min_width_model = config.loc['MIN_RS_MODEL_WIDTH'] # # # The following warning is to be expected, don't worry as it is # masked. # # RuntimeWarning: invalid value encountered in less. # width_model[np.ma.masked_invalid( # width_model) < min_width_model] = min_width_model # # # The red sequence model widths begin to break down at high redshift # # so a x_lim is enforced to prevent extrapolating into this regime. # idx_max_width = int(config.loc['MAX_RS_MODEL_WIDTH_IDX']) # # for j, z in enumerate(z_bins): # # Determine idx corresponding to the the redshift step in the red # # sequence # # model data that is most similar redshift of the candidate. # idx_model = np.argmin(np.absolute(z_model - z)) # # Determine the col distance from the red sequence. # # Imagine col (y-axis) vs magnitude (x-axis) with y = mx + c # # mag_auto_i = np.arrange(10, 23, 1) # col_model = (slope_model[idx_model] * mag_auto_i) + norm_model[ # idx_model] # # idx_candidate = np.argmin(np.absolute(z_bins - z)) # idx_galaxy = np.nanmin([idx_candidate, idx_max_width]) # red_sequence_width = width_model[idx_galaxy] # # # Filter out NaN values before interpolating. Note ~ is the # # invert operator. # idx_interpol = ~np.isnan(red_sequence_width) # # # Interpolate data with a piecewise cubic polynomial to # generate new # # data points for each of the i mag auto values. # interpolate_col = CubicSpline(i_bins[idx_interpol], # red_sequence_width[idx_interpol]) # col_scatter = interpolate_col(mag_auto_i) # # axs[i].plot(mag_auto_i, col_model, color=c_norm[j]) # axs[i].set_xlabel('i') # axs[i].set_ylabel('{0:l} - {1:l}'.format(col[0], col[1])) # axs[i].set_xlim(17, 23) # axs[i].set_ylim(0, 2) # axs[i].set_yticks([0, 0.5, 1, 1.5, 2]) # axs[i].set_xticks([17, 19, 21, 23]) # # cbar = mpl.colorbar.ColorbarBase(ax=cax, cmap=cmap, norm=norm, # orientation='vertical', # ticks=[0, 0.2, 0.4, 0.6]) # cbar.set_label('Redshift') # # cbar.ax.set_yticks() # # cbar.ax.set_yticklabels(['0', '0.2', '0.4', '0.6']) # # data_out = os.path.join(glo.dir_figs, 'models.png') # plt.savefig(data_out, format='png', dpi=300) # fig.tight_layout(rect=[0, 0.03, 1, 0.95]) # The models of red sequence width. # Redshift bins. # Magnitude bins, this is required for the interpolation step. # Plot every nth point # n = 10 # mag_auto_i = i_bins[0::n] # cmap = plt.get_cmap('plasma') # TODO: add a single redshift axes to the entire subplot # divider = make_axes_locatable(ax2) # cax = divider.append_axes('right', size='5%', pad=0.15) # Load variables from red sequence models Tables are also numpy arrays. # The as_matrix() method converts each pandas.series to a np.array. # For easy formatting. # Increase minimum intrinsic scatter. # The following warning is to be expected, don't worry as it is masked. # RuntimeWarning: invalid value encountered in less. # The red sequence model widths begin to break down at high redshift # so a x_lim is enforced to prevent extrapolating into this regime. # Determine idx corresponding to the the redshift step in the red # sequence # model data that is most similar redshift of the candidate. # Determine the col distance from the red sequence. # Imagine col (y-axis) vs magnitude (x-axis) with y = mx + c # mag_auto_i = np.arrange(10, 23, 1) # Filter out NaN values before interpolating. Note ~ is the # invert operator. # Interpolate data with a piecewise cubic polynomial to generate new # data points for each of the i mag auto values. # axs[i].set_xticks([17, 19, 21, 23]) # plt.colorbar(cb, ax=axs[i]) # , orientation='vertical', # # # ticks=[0, 0.2, 0.4, 0.6]) # fake up the array of the scalar mappable. Urgh… # plt.colorbar(sm) # cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) # cbar.set_label('Redshift') # cbar.ax.set_yticks() # cbar.ax.set_yticklabels(['0', '0.2', '0.4', '0.6']) # plt.tight_layout() | 1.988422 | 2 |
miscreant/block.py | miscreant/miscreant.py | 5 | 6612589 | """block.py: A 128-bit block (i.e. for AES)"""
from struct import (pack, unpack)
from cryptography.hazmat.primitives.ciphers import Cipher
from typing import Optional, Union
from . import ct
# Size of an AES block in bytes
SIZE = 16
# Minimal irreducible polynomial for a 128-bit block size
R = 0x87
def _validate_bytes_or_bytearray(value):
# type: (Union[bytearray, bytes]) -> bytearray
if isinstance(value, bytes):
value = bytearray(value)
elif not isinstance(value, bytearray):
raise TypeError("value must be bytes or bytearray")
if len(value) != SIZE:
raise ValueError("value must be 16-bytes")
return value
class Block(object):
"""128-bit AES blocks"""
def __init__(self, data=None):
# type: (Union[bytearray, bytes, None]) -> None
if data is None:
self.data = bytearray(SIZE)
else:
self.data = _validate_bytes_or_bytearray(data)
def clear(self):
# type: () -> None
"""Reset the value of this block to all zeroes"""
for i in range(SIZE):
self.data[i] = 0
def copy(self, other_block):
# type: (Block) -> None
"""Copy the contents of another block into this block"""
if not isinstance(other_block, Block):
raise TypeError("can only copy from other Blocks")
self.data[:] = other_block.data
def clone(self):
# type: () -> Block
"""Make another block with the same contents as this block"""
other = Block()
other.copy(self)
return other
def dbl(self):
# type: () -> None
"""Double a value over GF(2^128):
a<<1 if firstbit(a)=0
(a<<1) xor (0**120)10000111 if firstbit(a)=1
"""
overflow = 0
words = unpack(b"!LLLL", self.data)
output_words = []
for word in reversed(words):
new_word = (word << 1) & 0xFFFFFFFF
new_word |= overflow
overflow = int((word & 0x80000000) >= 0x80000000)
output_words.append(new_word)
self.data = bytearray(pack(b"!LLLL", *reversed(output_words)))
self.data[-1] ^= ct.select(overflow, R, 0)
def encrypt(self, cipher):
# type: (Cipher) -> None
"""Encrypt this block in-place with the given cipher"""
# TODO: more efficient in-place encryption options?
encryptor = cipher.encryptor()
self.data = bytearray(encryptor.update(bytes(self.data)) + encryptor.finalize())
def xor_in_place(self, value):
# type: (Union[Block, bytearray, bytes]) -> None
"""XOR the given data into the current block in-place"""
if isinstance(value, Block):
value = value.data
else:
value = _validate_bytes_or_bytearray(value)
for i in range(SIZE):
self.data[i] ^= value[i]
| """block.py: A 128-bit block (i.e. for AES)"""
from struct import (pack, unpack)
from cryptography.hazmat.primitives.ciphers import Cipher
from typing import Optional, Union
from . import ct
# Size of an AES block in bytes
SIZE = 16
# Minimal irreducible polynomial for a 128-bit block size
R = 0x87
def _validate_bytes_or_bytearray(value):
# type: (Union[bytearray, bytes]) -> bytearray
if isinstance(value, bytes):
value = bytearray(value)
elif not isinstance(value, bytearray):
raise TypeError("value must be bytes or bytearray")
if len(value) != SIZE:
raise ValueError("value must be 16-bytes")
return value
class Block(object):
"""128-bit AES blocks"""
def __init__(self, data=None):
# type: (Union[bytearray, bytes, None]) -> None
if data is None:
self.data = bytearray(SIZE)
else:
self.data = _validate_bytes_or_bytearray(data)
def clear(self):
# type: () -> None
"""Reset the value of this block to all zeroes"""
for i in range(SIZE):
self.data[i] = 0
def copy(self, other_block):
# type: (Block) -> None
"""Copy the contents of another block into this block"""
if not isinstance(other_block, Block):
raise TypeError("can only copy from other Blocks")
self.data[:] = other_block.data
def clone(self):
# type: () -> Block
"""Make another block with the same contents as this block"""
other = Block()
other.copy(self)
return other
def dbl(self):
# type: () -> None
"""Double a value over GF(2^128):
a<<1 if firstbit(a)=0
(a<<1) xor (0**120)10000111 if firstbit(a)=1
"""
overflow = 0
words = unpack(b"!LLLL", self.data)
output_words = []
for word in reversed(words):
new_word = (word << 1) & 0xFFFFFFFF
new_word |= overflow
overflow = int((word & 0x80000000) >= 0x80000000)
output_words.append(new_word)
self.data = bytearray(pack(b"!LLLL", *reversed(output_words)))
self.data[-1] ^= ct.select(overflow, R, 0)
def encrypt(self, cipher):
# type: (Cipher) -> None
"""Encrypt this block in-place with the given cipher"""
# TODO: more efficient in-place encryption options?
encryptor = cipher.encryptor()
self.data = bytearray(encryptor.update(bytes(self.data)) + encryptor.finalize())
def xor_in_place(self, value):
# type: (Union[Block, bytearray, bytes]) -> None
"""XOR the given data into the current block in-place"""
if isinstance(value, Block):
value = value.data
else:
value = _validate_bytes_or_bytearray(value)
for i in range(SIZE):
self.data[i] ^= value[i]
| en | 0.650632 | block.py: A 128-bit block (i.e. for AES) # Size of an AES block in bytes # Minimal irreducible polynomial for a 128-bit block size # type: (Union[bytearray, bytes]) -> bytearray 128-bit AES blocks # type: (Union[bytearray, bytes, None]) -> None # type: () -> None Reset the value of this block to all zeroes # type: (Block) -> None Copy the contents of another block into this block # type: () -> Block Make another block with the same contents as this block # type: () -> None Double a value over GF(2^128): a<<1 if firstbit(a)=0 (a<<1) xor (0**120)10000111 if firstbit(a)=1 # type: (Cipher) -> None Encrypt this block in-place with the given cipher # TODO: more efficient in-place encryption options? # type: (Union[Block, bytearray, bytes]) -> None XOR the given data into the current block in-place | 3.447169 | 3 |
src/pydp/algorithms/laplacian/_bounded_algorithms.py | levzlotnik/PyDP | 326 | 6612590 | # pydp relative
from .._algorithm import BoundedAlgorithm
class BoundedMean(BoundedAlgorithm):
"""
BoundedMean computes the average of values in a dataset, in a differentially private manner.
Incrementally provides a differentially private average.
All input vales are normalized to be their difference from the middle of the
input range. That allows us to calculate the sum of all input values with
half the sensitivity it would otherwise take for better accuracy (as compared
to doing noisy sum / noisy count). This algorithm is taken from section 2.5.5
of the following book (algorithm 2.4):
https://books.google.com/books?id=WFttDQAAQBAJ&pg=PA24#v=onepage&q&f=false
"""
pass
class BoundedSum(BoundedAlgorithm):
"""
BoundedSum computes the sum of values in a dataset, in a differentially private manner.
Incrementally provides a differentially private sum, clamped between upper
and lower values. Bounds can be manually set or privately inferred.
"""
pass
class BoundedStandardDeviation(BoundedAlgorithm):
"""
BoundedStandardDeviation computes the standard deviation of values in a dataset, in a differentially private manner.
Incrementally provides a differentially private standard deviation for values
in the range [lower..upper]. Values outside of this range will be clamped so
they lie in the range. The output will also be clamped between 0 and (upper -
lower).
The implementation simply computes the bounded variance and takes the square
root, which is differentially private by the post-processing theorem. It
relies on the fact that the bounded variance algorithm guarantees that the
output is non-negative.
"""
pass
class BoundedVariance(BoundedAlgorithm):
"""
BoundedVariance computes the variance of values in a dataset, in a differentially private manner.
Incrementally provides a differentially private variance for values in the
range [lower..upper]. Values outside of this range will be clamped so they
lie in the range. The output will also be clamped between 0 and (upper -
lower)^2. Since the result is guaranteed to be positive, this algorithm can
be used to compute a differentially private standard deviation.
The algorithm uses O(1) memory and runs in O(n) time where n is the size of
the dataset, making it a fast and efficient. The amount of noise added grows
quadratically in (upper - lower) and decreases linearly in n, so it might not
produce good results unless n >> (upper - lower)^2.
The algorithm is a variation of the algorithm for differentially private mean
from "Differential Privacy: From Theory to Practice", section 2.5.5:
https://books.google.com/books?id=WFttDQAAQBAJ&pg=PA24#v=onepage&q&f=false
"""
pass
class Max(BoundedAlgorithm):
"""
Max computes the Max value in the dataset, in a differentially private manner.
"""
pass
class Min(BoundedAlgorithm):
"""
Min computes the minium value in the dataset, in a differentially private manner.
"""
pass
class Median(BoundedAlgorithm):
"""
Median computes the Median value in the dataset, in a differentially private manner.
"""
pass
| # pydp relative
from .._algorithm import BoundedAlgorithm
class BoundedMean(BoundedAlgorithm):
"""
BoundedMean computes the average of values in a dataset, in a differentially private manner.
Incrementally provides a differentially private average.
All input vales are normalized to be their difference from the middle of the
input range. That allows us to calculate the sum of all input values with
half the sensitivity it would otherwise take for better accuracy (as compared
to doing noisy sum / noisy count). This algorithm is taken from section 2.5.5
of the following book (algorithm 2.4):
https://books.google.com/books?id=WFttDQAAQBAJ&pg=PA24#v=onepage&q&f=false
"""
pass
class BoundedSum(BoundedAlgorithm):
"""
BoundedSum computes the sum of values in a dataset, in a differentially private manner.
Incrementally provides a differentially private sum, clamped between upper
and lower values. Bounds can be manually set or privately inferred.
"""
pass
class BoundedStandardDeviation(BoundedAlgorithm):
"""
BoundedStandardDeviation computes the standard deviation of values in a dataset, in a differentially private manner.
Incrementally provides a differentially private standard deviation for values
in the range [lower..upper]. Values outside of this range will be clamped so
they lie in the range. The output will also be clamped between 0 and (upper -
lower).
The implementation simply computes the bounded variance and takes the square
root, which is differentially private by the post-processing theorem. It
relies on the fact that the bounded variance algorithm guarantees that the
output is non-negative.
"""
pass
class BoundedVariance(BoundedAlgorithm):
"""
BoundedVariance computes the variance of values in a dataset, in a differentially private manner.
Incrementally provides a differentially private variance for values in the
range [lower..upper]. Values outside of this range will be clamped so they
lie in the range. The output will also be clamped between 0 and (upper -
lower)^2. Since the result is guaranteed to be positive, this algorithm can
be used to compute a differentially private standard deviation.
The algorithm uses O(1) memory and runs in O(n) time where n is the size of
the dataset, making it a fast and efficient. The amount of noise added grows
quadratically in (upper - lower) and decreases linearly in n, so it might not
produce good results unless n >> (upper - lower)^2.
The algorithm is a variation of the algorithm for differentially private mean
from "Differential Privacy: From Theory to Practice", section 2.5.5:
https://books.google.com/books?id=WFttDQAAQBAJ&pg=PA24#v=onepage&q&f=false
"""
pass
class Max(BoundedAlgorithm):
"""
Max computes the Max value in the dataset, in a differentially private manner.
"""
pass
class Min(BoundedAlgorithm):
"""
Min computes the minium value in the dataset, in a differentially private manner.
"""
pass
class Median(BoundedAlgorithm):
"""
Median computes the Median value in the dataset, in a differentially private manner.
"""
pass
| en | 0.869469 | # pydp relative BoundedMean computes the average of values in a dataset, in a differentially private manner. Incrementally provides a differentially private average. All input vales are normalized to be their difference from the middle of the input range. That allows us to calculate the sum of all input values with half the sensitivity it would otherwise take for better accuracy (as compared to doing noisy sum / noisy count). This algorithm is taken from section 2.5.5 of the following book (algorithm 2.4): https://books.google.com/books?id=WFttDQAAQBAJ&pg=PA24#v=onepage&q&f=false BoundedSum computes the sum of values in a dataset, in a differentially private manner. Incrementally provides a differentially private sum, clamped between upper and lower values. Bounds can be manually set or privately inferred. BoundedStandardDeviation computes the standard deviation of values in a dataset, in a differentially private manner. Incrementally provides a differentially private standard deviation for values in the range [lower..upper]. Values outside of this range will be clamped so they lie in the range. The output will also be clamped between 0 and (upper - lower). The implementation simply computes the bounded variance and takes the square root, which is differentially private by the post-processing theorem. It relies on the fact that the bounded variance algorithm guarantees that the output is non-negative. BoundedVariance computes the variance of values in a dataset, in a differentially private manner. Incrementally provides a differentially private variance for values in the range [lower..upper]. Values outside of this range will be clamped so they lie in the range. The output will also be clamped between 0 and (upper - lower)^2. Since the result is guaranteed to be positive, this algorithm can be used to compute a differentially private standard deviation. The algorithm uses O(1) memory and runs in O(n) time where n is the size of the dataset, making it a fast and efficient. The amount of noise added grows quadratically in (upper - lower) and decreases linearly in n, so it might not produce good results unless n >> (upper - lower)^2. The algorithm is a variation of the algorithm for differentially private mean from "Differential Privacy: From Theory to Practice", section 2.5.5: https://books.google.com/books?id=WFttDQAAQBAJ&pg=PA24#v=onepage&q&f=false Max computes the Max value in the dataset, in a differentially private manner. Min computes the minium value in the dataset, in a differentially private manner. Median computes the Median value in the dataset, in a differentially private manner. | 3.462462 | 3 |
bin/multi_init_phot.py | wisemanp/des_stacks | 1 | 6612591 | <filename>bin/multi_init_phot.py<gh_stars>1-10
import numpy as np
import pandas as pd
import subprocess
import glob
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
from astropy.coordinates import SkyCoord
import logging
from astropy.table import Table
import astropy.io.fits as fits
import os
from astropy import units as u
from astropy.cosmology import FlatLambdaCDM
from astropy import wcs
from des_stacks import des_stack as stack
from des_stacks.bin import stack_all
from des_stacks.utils import stack_tools,source_tools,gen_tools
from des_stacks.analysis import astro
from des_stacks.utils.gen_tools import mc_robust_median as r_median
import time
import _pickle as cpickle
import itertools
import multiprocessing
from multiprocessing import Process
import pathos.pools as pp
bands = gen_tools.get_des_bands()
good_des_chips = []
for c in range(1,63):
if c not in [2,31,61]:
good_des_chips.append(c)
fields = ['E1','E2']#,'S1','S2','C1','C2','C3','X1','X2','X3']
bands = ['g','r','i','z']
bad_cats = []
def init_phot_worker(arg_pair):
args, chip = arg_pair[0],arg_pair[1]
my,f,b,cuts = [args[i] for i in range(len(args))]
ch = int(chip)
bd = os.path.join('/media/data3/wiseman/des/coadding/5yr_stacks/MY%s/'%my,f,b)
cat_fn = os.path.join(bd,str(chip),'ana',
'MY%s_%s_%s_%s_%s_%.1f_clipweighted_sci.sourcecat'%(my,f,b,
str(ch),cuts['teff'],cuts['psf']))
s = stack.Stack(f,b,my,ch,'coadding',cuts,db=False,new=True)
s.cuts = cuts
res_fn = os.path.join(bd,str(chip),'ana','%s_%s_%s_%s_init_wgtd.result'%(my,f,b,chip))
seeing_fn = res_fn.replace('wgtd','seeing')
if not os.path.isfile(seeing_fn):
os.rename(res_fn,seeing_fn)
try:
cat = Table.read(cat_fn).to_pandas()
astro.init_phot(s,str(chip),cat)
except:
bad_cats.append([my,f,b,chip])
return
def multi_init_phot(my,f,b,chips):
#cuts = {'psf':1.3,'teff':0.02}
cuts =stack_tools.get_cuts(f,b)
args = [my,f,b,cuts]
pool_size = multiprocessing.cpu_count()*2
act = multiprocessing.active_children()
pool = pp.ProcessPool(processes=pool_size,
maxtasksperchild=2,
)
pool._clear()
pool._serve()
chips = list(chips)
all_args = []
for c in chips:
all_args.append([args,c])
#p = Process(target=worker,args=(args,c))
#p.start()
#p.join()
results = pool.map(init_phot_worker,all_args)
pool.close()
pool.join()
return results
def main():
for f in fields:
f = 'SN-'+f
for b in bands:
cuts =stack_tools.get_cuts(f,b)
for y in [1,2,3,4,5]:
#cuts = {'teff':0.02,'psf':1.3}
multi_init_phot(y,f,b,good_des_chips)
print(bad_cats)
if __name__=="__main__":
main()
| <filename>bin/multi_init_phot.py<gh_stars>1-10
import numpy as np
import pandas as pd
import subprocess
import glob
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
from astropy.coordinates import SkyCoord
import logging
from astropy.table import Table
import astropy.io.fits as fits
import os
from astropy import units as u
from astropy.cosmology import FlatLambdaCDM
from astropy import wcs
from des_stacks import des_stack as stack
from des_stacks.bin import stack_all
from des_stacks.utils import stack_tools,source_tools,gen_tools
from des_stacks.analysis import astro
from des_stacks.utils.gen_tools import mc_robust_median as r_median
import time
import _pickle as cpickle
import itertools
import multiprocessing
from multiprocessing import Process
import pathos.pools as pp
bands = gen_tools.get_des_bands()
good_des_chips = []
for c in range(1,63):
if c not in [2,31,61]:
good_des_chips.append(c)
fields = ['E1','E2']#,'S1','S2','C1','C2','C3','X1','X2','X3']
bands = ['g','r','i','z']
bad_cats = []
def init_phot_worker(arg_pair):
args, chip = arg_pair[0],arg_pair[1]
my,f,b,cuts = [args[i] for i in range(len(args))]
ch = int(chip)
bd = os.path.join('/media/data3/wiseman/des/coadding/5yr_stacks/MY%s/'%my,f,b)
cat_fn = os.path.join(bd,str(chip),'ana',
'MY%s_%s_%s_%s_%s_%.1f_clipweighted_sci.sourcecat'%(my,f,b,
str(ch),cuts['teff'],cuts['psf']))
s = stack.Stack(f,b,my,ch,'coadding',cuts,db=False,new=True)
s.cuts = cuts
res_fn = os.path.join(bd,str(chip),'ana','%s_%s_%s_%s_init_wgtd.result'%(my,f,b,chip))
seeing_fn = res_fn.replace('wgtd','seeing')
if not os.path.isfile(seeing_fn):
os.rename(res_fn,seeing_fn)
try:
cat = Table.read(cat_fn).to_pandas()
astro.init_phot(s,str(chip),cat)
except:
bad_cats.append([my,f,b,chip])
return
def multi_init_phot(my,f,b,chips):
#cuts = {'psf':1.3,'teff':0.02}
cuts =stack_tools.get_cuts(f,b)
args = [my,f,b,cuts]
pool_size = multiprocessing.cpu_count()*2
act = multiprocessing.active_children()
pool = pp.ProcessPool(processes=pool_size,
maxtasksperchild=2,
)
pool._clear()
pool._serve()
chips = list(chips)
all_args = []
for c in chips:
all_args.append([args,c])
#p = Process(target=worker,args=(args,c))
#p.start()
#p.join()
results = pool.map(init_phot_worker,all_args)
pool.close()
pool.join()
return results
def main():
for f in fields:
f = 'SN-'+f
for b in bands:
cuts =stack_tools.get_cuts(f,b)
for y in [1,2,3,4,5]:
#cuts = {'teff':0.02,'psf':1.3}
multi_init_phot(y,f,b,good_des_chips)
print(bad_cats)
if __name__=="__main__":
main()
| en | 0.47672 | #,'S1','S2','C1','C2','C3','X1','X2','X3'] #cuts = {'psf':1.3,'teff':0.02} #p = Process(target=worker,args=(args,c)) #p.start() #p.join() #cuts = {'teff':0.02,'psf':1.3} | 1.705611 | 2 |
magus_kalkulator/limbs_table.py | miklosduma/magus | 0 | 6612592 | <filename>magus_kalkulator/limbs_table.py
"""
Limb penalties table.
"""
import magus_kalkulator.magus_constants as mgc
VEGTAG_THRESHOLDS = [50, 25, 17, 9]
VEGTAG_TABLA = {
mgc.SLASH: {
mgc.RLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.REDUCE_80],
[mgc.SLIGHT_PAIN,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_60],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.REDUCE_30],
mgc.MAIMING],
mgc.LLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.REDUCE_80],
[mgc.SLIGHT_PAIN,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_60],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.REDUCE_30],
mgc.MAIMING],
mgc.RARM: [
mgc.NULL_HANDICAP,
mgc.SLIGHT_BLEEDING,
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING],
mgc.LARM: [
mgc.NULL_HANDICAP,
mgc.SLIGHT_BLEEDING,
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING]},
mgc.THRUST: {
mgc.RLEG: [
mgc.NULL_HANDICAP,
[mgc.EXTRA_K6,
mgc.REDUCE_80],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.REDUCE_60],
[mgc.SLIGHT_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_40],
mgc.LIMB_PARALYSIS],
mgc.LLEG: [
mgc.NULL_HANDICAP,
[mgc.EXTRA_K6,
mgc.REDUCE_80],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.REDUCE_60],
[mgc.SLIGHT_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_40],
mgc.LIMB_PARALYSIS],
mgc.RARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.SLIGHT_HANDICAP_1],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING],
mgc.LARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.SLIGHT_HANDICAP_1],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING]},
mgc.BLUDGEON: {
mgc.RLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_HANDICAP,
mgc.REDUCE_80],
[mgc.SLIGHT_PAIN,
mgc.PARTIAL_NUMBNESS_1,
mgc.REDUCE_50],
[mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_30],
mgc.LIMB_PARALYSIS],
mgc.LLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_HANDICAP,
mgc.REDUCE_80],
[mgc.SLIGHT_PAIN,
mgc.PARTIAL_NUMBNESS_1,
mgc.REDUCE_50],
[mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_30],
mgc.LIMB_PARALYSIS],
mgc.RARM: [
mgc.NULL_HANDICAP,
mgc.SLIGHT_HANDICAP_1,
[mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.SLIGHT_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.LIMB_PARALYSIS],
mgc.LARM: [
mgc.NULL_HANDICAP,
mgc.SLIGHT_HANDICAP_1,
[mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.SLIGHT_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.LIMB_PARALYSIS]},
mgc.CLAW: {
mgc.RLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.REDUCE_90],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_60],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_30],
mgc.LIMB_PARALYSIS],
mgc.LLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.REDUCE_90],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_60],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_30],
mgc.LIMB_PARALYSIS],
mgc.RARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.EXTRA_K6],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.LIMB_PARALYSIS],
mgc.LARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.EXTRA_K6],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.LIMB_PARALYSIS]},
mgc.BITE: {
mgc.RLEG: [
mgc.NULL_HANDICAP,
[mgc.EXTRA_K6,
mgc.REDUCE_90],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_50],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_30],
mgc.MAIMING],
mgc.LLEG: [
mgc.NULL_HANDICAP,
[mgc.EXTRA_K6,
mgc.REDUCE_90],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_50],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_30],
mgc.MAIMING],
mgc.RARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.SLIGHT_HANDICAP_1],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING],
mgc.LARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.SLIGHT_HANDICAP_1],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING]}
}
| <filename>magus_kalkulator/limbs_table.py
"""
Limb penalties table.
"""
import magus_kalkulator.magus_constants as mgc
VEGTAG_THRESHOLDS = [50, 25, 17, 9]
VEGTAG_TABLA = {
mgc.SLASH: {
mgc.RLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.REDUCE_80],
[mgc.SLIGHT_PAIN,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_60],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.REDUCE_30],
mgc.MAIMING],
mgc.LLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.REDUCE_80],
[mgc.SLIGHT_PAIN,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_60],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.REDUCE_30],
mgc.MAIMING],
mgc.RARM: [
mgc.NULL_HANDICAP,
mgc.SLIGHT_BLEEDING,
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING],
mgc.LARM: [
mgc.NULL_HANDICAP,
mgc.SLIGHT_BLEEDING,
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING]},
mgc.THRUST: {
mgc.RLEG: [
mgc.NULL_HANDICAP,
[mgc.EXTRA_K6,
mgc.REDUCE_80],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.REDUCE_60],
[mgc.SLIGHT_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_40],
mgc.LIMB_PARALYSIS],
mgc.LLEG: [
mgc.NULL_HANDICAP,
[mgc.EXTRA_K6,
mgc.REDUCE_80],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.REDUCE_60],
[mgc.SLIGHT_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_40],
mgc.LIMB_PARALYSIS],
mgc.RARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.SLIGHT_HANDICAP_1],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING],
mgc.LARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.SLIGHT_HANDICAP_1],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING]},
mgc.BLUDGEON: {
mgc.RLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_HANDICAP,
mgc.REDUCE_80],
[mgc.SLIGHT_PAIN,
mgc.PARTIAL_NUMBNESS_1,
mgc.REDUCE_50],
[mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_30],
mgc.LIMB_PARALYSIS],
mgc.LLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_HANDICAP,
mgc.REDUCE_80],
[mgc.SLIGHT_PAIN,
mgc.PARTIAL_NUMBNESS_1,
mgc.REDUCE_50],
[mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_30],
mgc.LIMB_PARALYSIS],
mgc.RARM: [
mgc.NULL_HANDICAP,
mgc.SLIGHT_HANDICAP_1,
[mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.SLIGHT_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.LIMB_PARALYSIS],
mgc.LARM: [
mgc.NULL_HANDICAP,
mgc.SLIGHT_HANDICAP_1,
[mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.SLIGHT_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.LIMB_PARALYSIS]},
mgc.CLAW: {
mgc.RLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.REDUCE_90],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_60],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_30],
mgc.LIMB_PARALYSIS],
mgc.LLEG: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.REDUCE_90],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_60],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_30],
mgc.LIMB_PARALYSIS],
mgc.RARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.EXTRA_K6],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.LIMB_PARALYSIS],
mgc.LARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.EXTRA_K6],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.LIMB_PARALYSIS]},
mgc.BITE: {
mgc.RLEG: [
mgc.NULL_HANDICAP,
[mgc.EXTRA_K6,
mgc.REDUCE_90],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_50],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_30],
mgc.MAIMING],
mgc.LLEG: [
mgc.NULL_HANDICAP,
[mgc.EXTRA_K6,
mgc.REDUCE_90],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN,
mgc.REDUCE_50],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN,
mgc.REDUCE_30],
mgc.MAIMING],
mgc.RARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.SLIGHT_HANDICAP_1],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING],
mgc.LARM: [
mgc.NULL_HANDICAP,
[mgc.SLIGHT_BLEEDING,
mgc.SLIGHT_HANDICAP_1],
[mgc.SLIGHT_BLEEDING,
mgc.PARTIAL_NUMBNESS_1,
mgc.SLIGHT_PAIN],
[mgc.MODERATE_BLEEDING,
mgc.NUMBNESS_1,
mgc.MODERATE_PAIN],
mgc.MAIMING]}
}
| en | 0.716405 | Limb penalties table. | 1.588947 | 2 |
monascastatsd/__init__.py | openstack/monasca-statsd | 16 | 6612593 | # Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from monascastatsd import client
from monascastatsd import connection
from monascastatsd import counter
from monascastatsd import gauge
from monascastatsd import metricbase
from monascastatsd import timer
Client = client.Client
Connection = connection.Connection
Counter = counter.Counter
Gauge = gauge.Gauge
MetricBase = metricbase.MetricBase
Timer = timer.Timer
__all__ = [
'Client',
'Connection',
'Counter',
'Gauge',
'MetricBase',
'Timer'
]
| # Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from monascastatsd import client
from monascastatsd import connection
from monascastatsd import counter
from monascastatsd import gauge
from monascastatsd import metricbase
from monascastatsd import timer
Client = client.Client
Connection = connection.Connection
Counter = counter.Counter
Gauge = gauge.Gauge
MetricBase = metricbase.MetricBase
Timer = timer.Timer
__all__ = [
'Client',
'Connection',
'Counter',
'Gauge',
'MetricBase',
'Timer'
]
| en | 0.843453 | # Copyright 2016 FUJITSU LIMITED # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.449359 | 1 |
advanced_reports/backoffice/conf.py | iandroogmans/django-reports | 1 | 6612594 | from django.conf import settings
DB_IS_POSTGRES = 'postgresql' in settings.DATABASES['default'].get('ENGINE', '')
| from django.conf import settings
DB_IS_POSTGRES = 'postgresql' in settings.DATABASES['default'].get('ENGINE', '')
| none | 1 | 1.239493 | 1 | |
inageoportal/main.py | emhayusa/inageoportal | 0 | 6612595 | <filename>inageoportal/main.py
import click
def welcome():
"""Simple program that greets welcome."""
click.echo('Welcome to Inageoportal!')
if __name__ == '__main__':
welcome() | <filename>inageoportal/main.py
import click
def welcome():
"""Simple program that greets welcome."""
click.echo('Welcome to Inageoportal!')
if __name__ == '__main__':
welcome() | en | 0.821275 | Simple program that greets welcome. | 2.65329 | 3 |
bases/losses.py | kkahloots/Generative-Models-03 | 0 | 6612596 | <gh_stars>0
import tensorflow as tf
import utils.codes as codes
from utils.configuration import default_config as config
## ------------------- LOSS: EXPECTED LOWER BOUND ----------------------
# tsne_cost loss
def get_reconst_loss(x, x_recons, loss_func, epsilon=config.epsilon):
"""
Returns the reconstuction loss between x and x_recons
two modes:
OLS:
MSE(x, x_recons) Mean error squared
MLE:
Maximum log-likelihood estimator is the expected log-likelihood of the lower bound. For this we use a bernouilli LL.
"""
assert loss_func in codes.properties(codes.Losses), \
'Unsupported reconstuction loss loss_func'
if loss_func == codes.Losses.MLE:
return - tf.reduce_sum((x) * tf.log(x_recons + epsilon) +
(1 - x) * tf.log(1 - x_recons + epsilon), 1)
else:
return tf.losses.mean_pairwise_squared_error(x, x_recons)
### ---------------------------------------------- Divergences --------------------------------------------
### ---------------------------------------------- Divergences --------------------------------------------
def get_self_divergence(meanQ, log_varQ, loss_func):
log_varQ = 2.0*log_varQ
P = tf.distributions.Bernoulli(probs=tf.ones(meanQ.shape[-1]))
meanP = P.mean()
log_varP = P.variance()
return get_divergence(meanQ, log_varQ, meanP, log_varP, loss_func)
def get_QP_kl(meanQ, log_varQ, meanP, log_varP):
"""
KL[Q || P] returns the KL-divergence between the prior p and the variational posterior q.
:param meanQ: vector of means for q
:param log_varQ: vector of log-variances for q
:param meanP: vector of means for p
:param log_varP: vector of log-variances for p
:return: KL divergence between q and p
"""
#meanQ = posterior_mean
#log_varQ = posterior_logvar
#meanP = prior_mean
#log_varP = prior_logvar
return - 0.5 * tf.reduce_sum(
log_varP - log_varQ + (tf.square(meanQ - meanP) / tf.exp(log_varP)) + tf.exp(log_varQ - log_varP) - 1)
def get_divergence(meanQ, log_varQ, meanP, log_varP, div_loss):
assert div_loss in codes.properties(codes.Losses)\
, 'Unsupported divergences loss div_loss'
if div_loss == codes.Losses.KLD:
return get_KL_div(meanQ, log_varQ, meanP, log_varP)
elif div_loss == codes.Losses.RKLD:
return -get_KL_div(meanP, log_varP, meanQ, log_varQ)
elif div_loss == codes.Losses.JS:
return get_KL_div(meanQ, log_varQ, meanP, log_varP) * 0.5 + \
get_KL_div(meanP, log_varP, meanQ, log_varQ) * 0.5
elif div_loss == codes.Losses.CHI2:
return -0.5 * tf.reduce_sum(tf.exp(log_varP) + log_varQ
-(tf.square(meanQ - meanP) / tf.log(log_varP)-1)**2
- tf.exp(log_varQ - log_varP)**2 , 1)
elif div_loss == codes.Losses.Helling:
return -0.5 * tf.reduce_sum(tf.exp(log_varP) + log_varQ
-(tf.square(tf.square(meanQ - meanP) / tf.log(log_varP))-1)**2
- tf.exp(log_varQ - log_varP)**2 , 1)
def get_kl(mu, log_var):
"""
d_kl(q(latent|x)||p(latent)) returns the KL-divergence between the prior p and the variational posterior q.
:return: KL divergence between q and p
"""
# Formula: 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
return - 0.5 * tf.reduce_sum( 1.0 + 2.0 * log_var - tf.square(mu) - tf.exp(2.0 * log_var), 1)
def get_KL_div(meanQ, log_varQ, meanP, log_varP):
"""
KL[Q || P] returns the divergence between the prior p and the variational posterior q.
:param meanQ: vector of means for q
:param log_varQ: vector of log-variances for q
:param meanP: vector of means for p
:param log_varP: vector of log-variances for p
:return: KL divergence between q and p
"""
#meanQ = posterior_mean
#log_varQ = posterior_logvar
#meanP = prior_mean
#log_varP = prior_logvar
return -0.5 * tf.reduce_sum(tf.exp(log_varP) + log_varQ
-(tf.square(meanQ - meanP) / tf.exp(log_varP))
- tf.exp(log_varQ - log_varP) , 1)
def kl_divergence(P, Q, epsilon=config.epsilon):
"""
Compute the Kullback–Leibler divergence between two probability distributions
Args:
P : (tensorflow.placeholder): Tensor storing the target probability distribution
@ : (tensorflow.Variable): Tensor storing the model distribution
Returns:
KLD (tensorflow.Variable): Kullback–Leibler divergence
"""
Pc = tf.maximum(P, epsilon)
Qc = tf.maximum(Q, epsilon)
return tf.reduce_sum(P * tf.log(Pc / Qc))
def get_distributions_div_cost(Px, Qx, loss_func, epsilon=config.epsilon):
assert loss_func in codes.properties(codes.Losses),\
'Unsupported divergences loss loss_func'
if loss_func == codes.Losses.KLD:
return kl_divergence(Px, Qx)
if loss_func == codes.Losses.RKLD:
return -kl_divergence(Qx, Px)
elif loss_func == codes.Losses.JS:
return kl_divergence(Px, Qx) * 0.5 + \
kl_divergence(Qx, Px) * 0.5
elif loss_func == codes.Losses.CHI2:
Pxc = tf.maximum(Px, epsilon)
Qyc = tf.maximum(Qx, epsilon)
return tf.reduce_sum(Qx * (Pxc / Qyc - 1.) ** 2)
elif loss_func == codes.Losses.Helling:
Pxc = tf.maximum(Px, epsilon)
Qyc = tf.maximum(Qx, epsilon)
return tf.reduce_sum(Qx * (tf.sqrt(Pxc / Qyc) - 1.) ** 2)
| import tensorflow as tf
import utils.codes as codes
from utils.configuration import default_config as config
## ------------------- LOSS: EXPECTED LOWER BOUND ----------------------
# tsne_cost loss
def get_reconst_loss(x, x_recons, loss_func, epsilon=config.epsilon):
"""
Returns the reconstuction loss between x and x_recons
two modes:
OLS:
MSE(x, x_recons) Mean error squared
MLE:
Maximum log-likelihood estimator is the expected log-likelihood of the lower bound. For this we use a bernouilli LL.
"""
assert loss_func in codes.properties(codes.Losses), \
'Unsupported reconstuction loss loss_func'
if loss_func == codes.Losses.MLE:
return - tf.reduce_sum((x) * tf.log(x_recons + epsilon) +
(1 - x) * tf.log(1 - x_recons + epsilon), 1)
else:
return tf.losses.mean_pairwise_squared_error(x, x_recons)
### ---------------------------------------------- Divergences --------------------------------------------
### ---------------------------------------------- Divergences --------------------------------------------
def get_self_divergence(meanQ, log_varQ, loss_func):
log_varQ = 2.0*log_varQ
P = tf.distributions.Bernoulli(probs=tf.ones(meanQ.shape[-1]))
meanP = P.mean()
log_varP = P.variance()
return get_divergence(meanQ, log_varQ, meanP, log_varP, loss_func)
def get_QP_kl(meanQ, log_varQ, meanP, log_varP):
"""
KL[Q || P] returns the KL-divergence between the prior p and the variational posterior q.
:param meanQ: vector of means for q
:param log_varQ: vector of log-variances for q
:param meanP: vector of means for p
:param log_varP: vector of log-variances for p
:return: KL divergence between q and p
"""
#meanQ = posterior_mean
#log_varQ = posterior_logvar
#meanP = prior_mean
#log_varP = prior_logvar
return - 0.5 * tf.reduce_sum(
log_varP - log_varQ + (tf.square(meanQ - meanP) / tf.exp(log_varP)) + tf.exp(log_varQ - log_varP) - 1)
def get_divergence(meanQ, log_varQ, meanP, log_varP, div_loss):
assert div_loss in codes.properties(codes.Losses)\
, 'Unsupported divergences loss div_loss'
if div_loss == codes.Losses.KLD:
return get_KL_div(meanQ, log_varQ, meanP, log_varP)
elif div_loss == codes.Losses.RKLD:
return -get_KL_div(meanP, log_varP, meanQ, log_varQ)
elif div_loss == codes.Losses.JS:
return get_KL_div(meanQ, log_varQ, meanP, log_varP) * 0.5 + \
get_KL_div(meanP, log_varP, meanQ, log_varQ) * 0.5
elif div_loss == codes.Losses.CHI2:
return -0.5 * tf.reduce_sum(tf.exp(log_varP) + log_varQ
-(tf.square(meanQ - meanP) / tf.log(log_varP)-1)**2
- tf.exp(log_varQ - log_varP)**2 , 1)
elif div_loss == codes.Losses.Helling:
return -0.5 * tf.reduce_sum(tf.exp(log_varP) + log_varQ
-(tf.square(tf.square(meanQ - meanP) / tf.log(log_varP))-1)**2
- tf.exp(log_varQ - log_varP)**2 , 1)
def get_kl(mu, log_var):
"""
d_kl(q(latent|x)||p(latent)) returns the KL-divergence between the prior p and the variational posterior q.
:return: KL divergence between q and p
"""
# Formula: 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
return - 0.5 * tf.reduce_sum( 1.0 + 2.0 * log_var - tf.square(mu) - tf.exp(2.0 * log_var), 1)
def get_KL_div(meanQ, log_varQ, meanP, log_varP):
"""
KL[Q || P] returns the divergence between the prior p and the variational posterior q.
:param meanQ: vector of means for q
:param log_varQ: vector of log-variances for q
:param meanP: vector of means for p
:param log_varP: vector of log-variances for p
:return: KL divergence between q and p
"""
#meanQ = posterior_mean
#log_varQ = posterior_logvar
#meanP = prior_mean
#log_varP = prior_logvar
return -0.5 * tf.reduce_sum(tf.exp(log_varP) + log_varQ
-(tf.square(meanQ - meanP) / tf.exp(log_varP))
- tf.exp(log_varQ - log_varP) , 1)
def kl_divergence(P, Q, epsilon=config.epsilon):
"""
Compute the Kullback–Leibler divergence between two probability distributions
Args:
P : (tensorflow.placeholder): Tensor storing the target probability distribution
@ : (tensorflow.Variable): Tensor storing the model distribution
Returns:
KLD (tensorflow.Variable): Kullback–Leibler divergence
"""
Pc = tf.maximum(P, epsilon)
Qc = tf.maximum(Q, epsilon)
return tf.reduce_sum(P * tf.log(Pc / Qc))
def get_distributions_div_cost(Px, Qx, loss_func, epsilon=config.epsilon):
assert loss_func in codes.properties(codes.Losses),\
'Unsupported divergences loss loss_func'
if loss_func == codes.Losses.KLD:
return kl_divergence(Px, Qx)
if loss_func == codes.Losses.RKLD:
return -kl_divergence(Qx, Px)
elif loss_func == codes.Losses.JS:
return kl_divergence(Px, Qx) * 0.5 + \
kl_divergence(Qx, Px) * 0.5
elif loss_func == codes.Losses.CHI2:
Pxc = tf.maximum(Px, epsilon)
Qyc = tf.maximum(Qx, epsilon)
return tf.reduce_sum(Qx * (Pxc / Qyc - 1.) ** 2)
elif loss_func == codes.Losses.Helling:
Pxc = tf.maximum(Px, epsilon)
Qyc = tf.maximum(Qx, epsilon)
return tf.reduce_sum(Qx * (tf.sqrt(Pxc / Qyc) - 1.) ** 2) | en | 0.65316 | ## ------------------- LOSS: EXPECTED LOWER BOUND ---------------------- # tsne_cost loss Returns the reconstuction loss between x and x_recons
two modes:
OLS:
MSE(x, x_recons) Mean error squared
MLE:
Maximum log-likelihood estimator is the expected log-likelihood of the lower bound. For this we use a bernouilli LL. ### ---------------------------------------------- Divergences -------------------------------------------- ### ---------------------------------------------- Divergences -------------------------------------------- KL[Q || P] returns the KL-divergence between the prior p and the variational posterior q. :param meanQ: vector of means for q :param log_varQ: vector of log-variances for q :param meanP: vector of means for p :param log_varP: vector of log-variances for p :return: KL divergence between q and p #meanQ = posterior_mean #log_varQ = posterior_logvar #meanP = prior_mean #log_varP = prior_logvar d_kl(q(latent|x)||p(latent)) returns the KL-divergence between the prior p and the variational posterior q.
:return: KL divergence between q and p # Formula: 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KL[Q || P] returns the divergence between the prior p and the variational posterior q.
:param meanQ: vector of means for q
:param log_varQ: vector of log-variances for q
:param meanP: vector of means for p
:param log_varP: vector of log-variances for p
:return: KL divergence between q and p #meanQ = posterior_mean #log_varQ = posterior_logvar #meanP = prior_mean #log_varP = prior_logvar Compute the Kullback–Leibler divergence between two probability distributions
Args:
P : (tensorflow.placeholder): Tensor storing the target probability distribution
@ : (tensorflow.Variable): Tensor storing the model distribution
Returns:
KLD (tensorflow.Variable): Kullback–Leibler divergence | 2.400978 | 2 |
snakegame/setup.py | wilomgfx/PyGameSnake | 0 | 6612597 | __author__ = 'William'
import cx_Freeze
executables = [cx_Freeze.Executable("SnakeGamePyGame.py")]
cx_Freeze.setup(name="SnakeyGame",
options={"build_exe":{"packages":["pygame"],"include_files":["apple.png","snakehead.png"]}},
description = "Snakey game... just eat the apple",
version = "1.0.0",
executables = executables
) | __author__ = 'William'
import cx_Freeze
executables = [cx_Freeze.Executable("SnakeGamePyGame.py")]
cx_Freeze.setup(name="SnakeyGame",
options={"build_exe":{"packages":["pygame"],"include_files":["apple.png","snakehead.png"]}},
description = "Snakey game... just eat the apple",
version = "1.0.0",
executables = executables
) | none | 1 | 1.597829 | 2 | |
old code/utils.py | dll-ncai/AI-ForestWatch | 2 | 6612598 | <reponame>dll-ncai/AI-ForestWatch
# Copyright (c) 2021, Technische Universität Kaiserslautern (TUK) & National University of Sciences and Technology (NUST).
# All rights reserved.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A few helper functions
"""
from __future__ import print_function
from __future__ import division
import os
import numpy as np
import PIL.Image as Image
import scipy.io as sio
def load_weights_from_matfiles(dir_path):
"""
Uses scipy.io to read .mat files and loads weights into torch model
:param path_to_file: path to mat file to read
:return: None, but saves the model dictionary!
"""
import pickle
model_file = 'Unet_pretrained_model.pkl'
if os.path.exists(os.path.join(dir_path, model_file)):
print('loading saved model dictionary...')
with open(os.path.join(dir_path, model_file), 'rb') as handle:
model_dict = pickle.load(handle)
for i, layer in enumerate(model_dict.keys(), 1):
print('{}.'.format(i), layer, model_dict[layer].shape)
else:
model_dict = {}
for file in [x for x in os.listdir(dir_path) if x.endswith('.mat')]:
layer, _ = os.path.splitext(file)
try:
read = sio.loadmat(os.path.join(dir_path, file))
except:
print(layer)
print(layer, read[layer].shape)
model_dict[layer] = read[layer]
pass
os.chdir('/home/annus/Desktop/trainedUnet/weightsforpython/')
with open(model_file, 'wb') as handle:
pickle.dump(model_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('Saved model!!!')
def show_image():
def histeq(im):
""" Histogram equalization of a grayscale image. """
nbr_bins = 256
# get image histogram
imhist, bins = np.histogram(im.flatten(), nbr_bins, normed=True)
cdf = imhist.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
im2 = np.interp(im.flatten(), bins[:-1], cdf)
return im2.reshape(im.shape)
os.chdir('/home/annus/Desktop/rit18_data/')
train_data = np.load('train_data.npy', mmap_mode='r').transpose((2, 1, 0))
print(train_data.shape)
w, h, patch = 2000, 2000, 1000
image = train_data[w:w + patch, h:h + patch, 4:]
# image = (255 / 65536 * image).astype(np.int8)
r, g, b = map(histeq, [image[:, :, 0], image[:, :, 1], image[:, :, 2]])
image = Image.fromarray(np.dstack((r, g, b)), 'RGB')
# image = cv2.normalize(image, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,
# dtype=cv2.CV_32F).astype(np.int8)
# print(image.shape, image.dtype, np.max(np.max(image)), np.min(np.min(image)), np.mean(np.mean(image)))
# pl.imshow(image)
# pl.axis('off')
# pl.show()
os.chdir('/home/annus/Desktop/')
image.save('image.png')
| # Copyright (c) 2021, Technische Universität Kaiserslautern (TUK) & National University of Sciences and Technology (NUST).
# All rights reserved.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A few helper functions
"""
from __future__ import print_function
from __future__ import division
import os
import numpy as np
import PIL.Image as Image
import scipy.io as sio
def load_weights_from_matfiles(dir_path):
"""
Uses scipy.io to read .mat files and loads weights into torch model
:param path_to_file: path to mat file to read
:return: None, but saves the model dictionary!
"""
import pickle
model_file = 'Unet_pretrained_model.pkl'
if os.path.exists(os.path.join(dir_path, model_file)):
print('loading saved model dictionary...')
with open(os.path.join(dir_path, model_file), 'rb') as handle:
model_dict = pickle.load(handle)
for i, layer in enumerate(model_dict.keys(), 1):
print('{}.'.format(i), layer, model_dict[layer].shape)
else:
model_dict = {}
for file in [x for x in os.listdir(dir_path) if x.endswith('.mat')]:
layer, _ = os.path.splitext(file)
try:
read = sio.loadmat(os.path.join(dir_path, file))
except:
print(layer)
print(layer, read[layer].shape)
model_dict[layer] = read[layer]
pass
os.chdir('/home/annus/Desktop/trainedUnet/weightsforpython/')
with open(model_file, 'wb') as handle:
pickle.dump(model_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('Saved model!!!')
def show_image():
def histeq(im):
""" Histogram equalization of a grayscale image. """
nbr_bins = 256
# get image histogram
imhist, bins = np.histogram(im.flatten(), nbr_bins, normed=True)
cdf = imhist.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
im2 = np.interp(im.flatten(), bins[:-1], cdf)
return im2.reshape(im.shape)
os.chdir('/home/annus/Desktop/rit18_data/')
train_data = np.load('train_data.npy', mmap_mode='r').transpose((2, 1, 0))
print(train_data.shape)
w, h, patch = 2000, 2000, 1000
image = train_data[w:w + patch, h:h + patch, 4:]
# image = (255 / 65536 * image).astype(np.int8)
r, g, b = map(histeq, [image[:, :, 0], image[:, :, 1], image[:, :, 2]])
image = Image.fromarray(np.dstack((r, g, b)), 'RGB')
# image = cv2.normalize(image, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,
# dtype=cv2.CV_32F).astype(np.int8)
# print(image.shape, image.dtype, np.max(np.max(image)), np.min(np.min(image)), np.mean(np.mean(image)))
# pl.imshow(image)
# pl.axis('off')
# pl.show()
os.chdir('/home/annus/Desktop/')
image.save('image.png') | en | 0.60503 | # Copyright (c) 2021, Technische Universität Kaiserslautern (TUK) & National University of Sciences and Technology (NUST). # All rights reserved. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. A few helper functions Uses scipy.io to read .mat files and loads weights into torch model :param path_to_file: path to mat file to read :return: None, but saves the model dictionary! Histogram equalization of a grayscale image. # get image histogram # cumulative distribution function # normalize # use linear interpolation of cdf to find new pixel values # image = (255 / 65536 * image).astype(np.int8) # image = cv2.normalize(image, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, # dtype=cv2.CV_32F).astype(np.int8) # print(image.shape, image.dtype, np.max(np.max(image)), np.min(np.min(image)), np.mean(np.mean(image))) # pl.imshow(image) # pl.axis('off') # pl.show() | 2.144355 | 2 |
sfsidb/load.py | eng-tools/sfsidb | 1 | 6612599 | import numpy as np
import warnings
from sfsidb import constants
import glob
def deprecation(message):
warnings.warn(message, stacklevel=3)
def create_motion_name(test_name, sensor_code, code_suffix=""):
"""
Builds the full name of the file
:param test_name: str, test name
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:param code_suffix: str, suffix
:return:
"""
return "%s-%s-%s" % (test_name, sensor_code, code_suffix)
def get_sensor_code_by_number(si, mtype, sensor_number, quiet=False):
"""
Given a sensor number, get the full sensor code (e.g. ACCX-UB1-L2C-M)
:param si: dict, sensor index json dictionary
:param mtype: str, sensor type
:param sensor_number: int, number of sensor
:param quiet: bool, if true then return None if not found
:return: str or None, sensor_code: a sensor code (e.g. ACCX-UB1-L2C-M)
"""
try:
if 'Orientation' in si[mtype][sensor_number]:
orientation = si[mtype][sensor_number]['Orientation']
else:
orientation = ""
return "%s%s-%s-%s-%s" % (mtype,
orientation,
si[mtype][sensor_number]['X-CODE'],
si[mtype][sensor_number]['Y-CODE'],
si[mtype][sensor_number]['Z-CODE'])
except KeyError:
if quiet:
return None
raise
def get_mtype_and_number_from_code(si, sensor_code):
"""
Given a sensor sensor_code, get motion type and sensor number
:param si: dict, sensor index json dictionary
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:return:
"""
mtype_and_ory, x, y, z = sensor_code.split("-")
if mtype_and_ory[-1] in "XYZ" and "ACCX" not in si: # Need to support old sensor_file.json files.
mtype = mtype_and_ory[:-1]
else:
mtype = mtype_and_ory
for m_number in si[mtype]:
cc = get_sensor_code_by_number(si, mtype, m_number)
if cc == sensor_code:
return mtype, m_number
return None, None
def get_all_sensor_codes(si, wild_sensor_code):
"""
Get all sensor sensor_codes that match a wild sensor code
:param si: dict, sensor index json dictionary
:param wild_sensor_code: str, a sensor code with "*" for wildcards (e.g. ACCX-*-L2C-*)
:return:
"""
mtype_and_ory, x, y, z = wild_sensor_code.split("-")
if mtype_and_ory == "*":
mtypes = list(si)
elif mtype_and_ory[-1] in "XYZ" and "ACCX" not in si: # Need to support old sensor_file.json files.
mtypes = [mtype_and_ory[:-1]]
else:
mtypes = [mtype_and_ory]
all_sensor_codes = []
for mtype in mtypes:
for m_number in si[mtype]:
if x in ["*", si[mtype][m_number]['X-CODE']] and \
y in ["*", si[mtype][m_number]['Y-CODE']] and \
z in ["*", si[mtype][m_number]['Z-CODE']]:
cc = get_sensor_code_by_number(si, mtype, m_number)
all_sensor_codes.append(cc)
return all_sensor_codes
def load_record(ffp, dbset, quiet=False):
deprecation('Deprecated, switch to load_record_and_time, load_record_and_dt')
# raise Warning("Deprecated, switch to load_record_and_time, load_record_and_dt")
if quiet:
try:
data = np.loadtxt(ffp + dbset.SENSOR_FILE_TYPE,
dtype='float',
delimiter=dbset.SENSOR_DATA_DELIMITER,
skiprows=dbset.SENSOR_DATA_SKIP_ROWS)
except FileNotFoundError:
print("File not found: ", ffp + dbset.SENSOR_FILE_TYPE)
return None, None
except IOError:
print("File not found: ", ffp + dbset.SENSOR_FILE_TYPE)
return None, None
else:
data = np.loadtxt(ffp + dbset.SENSOR_FILE_TYPE,
dtype='float',
delimiter=dbset.SENSOR_DATA_DELIMITER,
skiprows=dbset.SENSOR_DATA_SKIP_ROWS)
time = data[:, 0]
dt = time[1] - time[0]
series = data[:, 1]
return series, time
def get_available_sensor_codes(ffp, local_path_ext, wild_sensor_code, dbset):
file_name = dbset.create_file_name("*", wild_sensor_code)
full_wild_file_path = ffp + local_path_ext + file_name
files = glob.glob(full_wild_file_path)
files.sort()
import re
compiled = re.compile(wild_sensor_code)
for ff in range(len(files)):
ms = compiled.match(files[ff])
# files[ff] = ms
# sname = files[ff].split(local_path_ext)[-1]
# sname = sname.split()
# files[ff].replace(files[ff])
return files
def load_record_only(db_fp, local_path_ext, test_name, sensor_code, dbset, quiet=False, first=True):
"""
Finds the file and returns the time series of values
:param db_fp: str, Database root directory
:param local_path_ext: str, local path to sensor file from database root directory
:param test_name: str, name of test used as prefix of file name
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:param dbset: module, A database set module from sfsidb.sets
:param quiet: bool, if True then return None
:return:
"""
folder_path = db_fp + local_path_ext
rec_and_dt = dbset.wild_load_record_and_dt(folder_path, test_name, sensor_code, quiet, first)
if rec_and_dt is None and quiet:
return None
return rec_and_dt[0]
def load_record_and_time(db_fp, local_path_ext, test_name, sensor_code, dbset, quiet=False, first=True):
"""
Finds the file and returns the time series of values and the time series
:param db_fp: str, Database root directory
:param local_path_ext: str, local path to sensor file from database root directory
:param test_name: str, name of test used as prefix of file name
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:param dbset: module, A database set module from sfsidb.sets
:param quiet: bool, if True then return None
:return:
"""
folder_path = db_fp + local_path_ext
if first:
rec, dt = dbset.wild_load_record_and_dt(folder_path, test_name, sensor_code, quiet, first=first)
if rec is None and quiet:
return None, None
time = np.arange(1, len(rec) + 1) * dt
return rec, time
else:
recs, dts = dbset.wild_load_record_and_dt(folder_path, test_name, sensor_code, quiet, first)
times = []
for i, dt in enumerate(dts):
time = np.arange(1, len(recs[i]) + 1) * dt
times.append(time)
return recs, times
def load_record_and_dt(db_fp, local_path_ext, test_name, sensor_code, dbset, quiet=False, first=True):
"""
Finds the file and returns the time series of values and the time step
:param db_fp: str, Database root directory
:param local_path_ext: str, local path to sensor file from database root directory
:param test_name: str, name of test used as prefix of file name
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:param dbset: module, A database set module from sfsidb.sets
:param quiet: bool, if True then return None
:return:
"""
folder_path = db_fp + local_path_ext
return dbset.wild_load_record_and_dt(folder_path, test_name, sensor_code, quiet, first)
def sensor_code_to_name(sensor_code, part="sensor"):
"""
Converts a sensor code into written english.
E.g. ACCX-UB1-L2C-M = Horizontal acceleration
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:param part: str, what part of the code to convert
:return: str
"""
mtype_and_ory, x, y, z = sensor_code.split("-")
if part == "sensor":
return constants.sensor_type_codes[mtype_and_ory]
elif part == "xloc":
return constants.x_locations[x]
elif part == "yloc":
return constants.y_locations[y]
| import numpy as np
import warnings
from sfsidb import constants
import glob
def deprecation(message):
warnings.warn(message, stacklevel=3)
def create_motion_name(test_name, sensor_code, code_suffix=""):
"""
Builds the full name of the file
:param test_name: str, test name
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:param code_suffix: str, suffix
:return:
"""
return "%s-%s-%s" % (test_name, sensor_code, code_suffix)
def get_sensor_code_by_number(si, mtype, sensor_number, quiet=False):
"""
Given a sensor number, get the full sensor code (e.g. ACCX-UB1-L2C-M)
:param si: dict, sensor index json dictionary
:param mtype: str, sensor type
:param sensor_number: int, number of sensor
:param quiet: bool, if true then return None if not found
:return: str or None, sensor_code: a sensor code (e.g. ACCX-UB1-L2C-M)
"""
try:
if 'Orientation' in si[mtype][sensor_number]:
orientation = si[mtype][sensor_number]['Orientation']
else:
orientation = ""
return "%s%s-%s-%s-%s" % (mtype,
orientation,
si[mtype][sensor_number]['X-CODE'],
si[mtype][sensor_number]['Y-CODE'],
si[mtype][sensor_number]['Z-CODE'])
except KeyError:
if quiet:
return None
raise
def get_mtype_and_number_from_code(si, sensor_code):
"""
Given a sensor sensor_code, get motion type and sensor number
:param si: dict, sensor index json dictionary
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:return:
"""
mtype_and_ory, x, y, z = sensor_code.split("-")
if mtype_and_ory[-1] in "XYZ" and "ACCX" not in si: # Need to support old sensor_file.json files.
mtype = mtype_and_ory[:-1]
else:
mtype = mtype_and_ory
for m_number in si[mtype]:
cc = get_sensor_code_by_number(si, mtype, m_number)
if cc == sensor_code:
return mtype, m_number
return None, None
def get_all_sensor_codes(si, wild_sensor_code):
"""
Get all sensor sensor_codes that match a wild sensor code
:param si: dict, sensor index json dictionary
:param wild_sensor_code: str, a sensor code with "*" for wildcards (e.g. ACCX-*-L2C-*)
:return:
"""
mtype_and_ory, x, y, z = wild_sensor_code.split("-")
if mtype_and_ory == "*":
mtypes = list(si)
elif mtype_and_ory[-1] in "XYZ" and "ACCX" not in si: # Need to support old sensor_file.json files.
mtypes = [mtype_and_ory[:-1]]
else:
mtypes = [mtype_and_ory]
all_sensor_codes = []
for mtype in mtypes:
for m_number in si[mtype]:
if x in ["*", si[mtype][m_number]['X-CODE']] and \
y in ["*", si[mtype][m_number]['Y-CODE']] and \
z in ["*", si[mtype][m_number]['Z-CODE']]:
cc = get_sensor_code_by_number(si, mtype, m_number)
all_sensor_codes.append(cc)
return all_sensor_codes
def load_record(ffp, dbset, quiet=False):
deprecation('Deprecated, switch to load_record_and_time, load_record_and_dt')
# raise Warning("Deprecated, switch to load_record_and_time, load_record_and_dt")
if quiet:
try:
data = np.loadtxt(ffp + dbset.SENSOR_FILE_TYPE,
dtype='float',
delimiter=dbset.SENSOR_DATA_DELIMITER,
skiprows=dbset.SENSOR_DATA_SKIP_ROWS)
except FileNotFoundError:
print("File not found: ", ffp + dbset.SENSOR_FILE_TYPE)
return None, None
except IOError:
print("File not found: ", ffp + dbset.SENSOR_FILE_TYPE)
return None, None
else:
data = np.loadtxt(ffp + dbset.SENSOR_FILE_TYPE,
dtype='float',
delimiter=dbset.SENSOR_DATA_DELIMITER,
skiprows=dbset.SENSOR_DATA_SKIP_ROWS)
time = data[:, 0]
dt = time[1] - time[0]
series = data[:, 1]
return series, time
def get_available_sensor_codes(ffp, local_path_ext, wild_sensor_code, dbset):
file_name = dbset.create_file_name("*", wild_sensor_code)
full_wild_file_path = ffp + local_path_ext + file_name
files = glob.glob(full_wild_file_path)
files.sort()
import re
compiled = re.compile(wild_sensor_code)
for ff in range(len(files)):
ms = compiled.match(files[ff])
# files[ff] = ms
# sname = files[ff].split(local_path_ext)[-1]
# sname = sname.split()
# files[ff].replace(files[ff])
return files
def load_record_only(db_fp, local_path_ext, test_name, sensor_code, dbset, quiet=False, first=True):
"""
Finds the file and returns the time series of values
:param db_fp: str, Database root directory
:param local_path_ext: str, local path to sensor file from database root directory
:param test_name: str, name of test used as prefix of file name
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:param dbset: module, A database set module from sfsidb.sets
:param quiet: bool, if True then return None
:return:
"""
folder_path = db_fp + local_path_ext
rec_and_dt = dbset.wild_load_record_and_dt(folder_path, test_name, sensor_code, quiet, first)
if rec_and_dt is None and quiet:
return None
return rec_and_dt[0]
def load_record_and_time(db_fp, local_path_ext, test_name, sensor_code, dbset, quiet=False, first=True):
"""
Finds the file and returns the time series of values and the time series
:param db_fp: str, Database root directory
:param local_path_ext: str, local path to sensor file from database root directory
:param test_name: str, name of test used as prefix of file name
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:param dbset: module, A database set module from sfsidb.sets
:param quiet: bool, if True then return None
:return:
"""
folder_path = db_fp + local_path_ext
if first:
rec, dt = dbset.wild_load_record_and_dt(folder_path, test_name, sensor_code, quiet, first=first)
if rec is None and quiet:
return None, None
time = np.arange(1, len(rec) + 1) * dt
return rec, time
else:
recs, dts = dbset.wild_load_record_and_dt(folder_path, test_name, sensor_code, quiet, first)
times = []
for i, dt in enumerate(dts):
time = np.arange(1, len(recs[i]) + 1) * dt
times.append(time)
return recs, times
def load_record_and_dt(db_fp, local_path_ext, test_name, sensor_code, dbset, quiet=False, first=True):
"""
Finds the file and returns the time series of values and the time step
:param db_fp: str, Database root directory
:param local_path_ext: str, local path to sensor file from database root directory
:param test_name: str, name of test used as prefix of file name
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:param dbset: module, A database set module from sfsidb.sets
:param quiet: bool, if True then return None
:return:
"""
folder_path = db_fp + local_path_ext
return dbset.wild_load_record_and_dt(folder_path, test_name, sensor_code, quiet, first)
def sensor_code_to_name(sensor_code, part="sensor"):
"""
Converts a sensor code into written english.
E.g. ACCX-UB1-L2C-M = Horizontal acceleration
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:param part: str, what part of the code to convert
:return: str
"""
mtype_and_ory, x, y, z = sensor_code.split("-")
if part == "sensor":
return constants.sensor_type_codes[mtype_and_ory]
elif part == "xloc":
return constants.x_locations[x]
elif part == "yloc":
return constants.y_locations[y]
| en | 0.61958 | Builds the full name of the file :param test_name: str, test name :param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M) :param code_suffix: str, suffix :return: Given a sensor number, get the full sensor code (e.g. ACCX-UB1-L2C-M) :param si: dict, sensor index json dictionary :param mtype: str, sensor type :param sensor_number: int, number of sensor :param quiet: bool, if true then return None if not found :return: str or None, sensor_code: a sensor code (e.g. ACCX-UB1-L2C-M) Given a sensor sensor_code, get motion type and sensor number :param si: dict, sensor index json dictionary :param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M) :return: # Need to support old sensor_file.json files. Get all sensor sensor_codes that match a wild sensor code :param si: dict, sensor index json dictionary :param wild_sensor_code: str, a sensor code with "*" for wildcards (e.g. ACCX-*-L2C-*) :return: # Need to support old sensor_file.json files. # raise Warning("Deprecated, switch to load_record_and_time, load_record_and_dt") # files[ff] = ms # sname = files[ff].split(local_path_ext)[-1] # sname = sname.split() # files[ff].replace(files[ff]) Finds the file and returns the time series of values :param db_fp: str, Database root directory :param local_path_ext: str, local path to sensor file from database root directory :param test_name: str, name of test used as prefix of file name :param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M) :param dbset: module, A database set module from sfsidb.sets :param quiet: bool, if True then return None :return: Finds the file and returns the time series of values and the time series :param db_fp: str, Database root directory :param local_path_ext: str, local path to sensor file from database root directory :param test_name: str, name of test used as prefix of file name :param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M) :param dbset: module, A database set module from sfsidb.sets :param quiet: bool, if True then return None :return: Finds the file and returns the time series of values and the time step :param db_fp: str, Database root directory :param local_path_ext: str, local path to sensor file from database root directory :param test_name: str, name of test used as prefix of file name :param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M) :param dbset: module, A database set module from sfsidb.sets :param quiet: bool, if True then return None :return: Converts a sensor code into written english. E.g. ACCX-UB1-L2C-M = Horizontal acceleration :param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M) :param part: str, what part of the code to convert :return: str | 2.386603 | 2 |
sparse/repos/betatim/talk-swiss-python-summit-2018/setup.py | yuvipanda/mybinder.org-analytics | 1 | 6612600 | from setuptools import setup
setup(name='bikes',
version='0.0.1',
description='Zurich bike helpers',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
long_description='Zurich bike helpers',
packages=['bikes'],
install_requires=['pandas', 'matplotlib', 'requests']
)
| from setuptools import setup
setup(name='bikes',
version='0.0.1',
description='Zurich bike helpers',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
long_description='Zurich bike helpers',
packages=['bikes'],
install_requires=['pandas', 'matplotlib', 'requests']
)
| none | 1 | 1.174421 | 1 | |
TensorArtist/tartist/plugins/trainer_enhancer/summary.py | cosmic119/DiscoGAN | 0 | 6612601 | <filename>TensorArtist/tartist/plugins/trainer_enhancer/summary.py
# -*- coding:utf8 -*-
# File : summary.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 2/26/17
#
# This file is part of TensorArtist.
import collections
import json
import math
import os
import os.path as osp
import random
import shutil
import subprocess
import threading
import tensorflow as tf
from tartist.core import get_logger, get_env, io
from tartist.data.rflow.utils import get_addr
from tartist.nn.tfutils import format_summary_name, clean_summary_suffix
logger = get_logger()
summary_async_lock = threading.Lock()
class SummaryHistoryManager(object):
def __init__(self):
self._summaries = {}
self._summaries_type = {}
self._summaries_last_query = {}
@property
def all_summaries(self):
return self.get_all_summaries()
def get_all_summaries(self, type=None):
if type is None:
return list(self._summaries_type.keys())
filt = lambda x: type == x
return [k for k, v in self._summaries_type.items() if filt(v)]
def clear_all(self):
self._summaries = {}
def clear(self, key):
self._summaries[key] = []
def put_scalar(self, key, value):
value = float(value)
self._summaries.setdefault(key, []).append(value)
def put_async_scalar(self, key, value):
value = float(value)
with summary_async_lock:
self._summaries.setdefault(key, []).append(value)
def put_summaries(self, summaries):
for val in summaries.value:
if val.WhichOneof('value') == 'simple_value':
# XXX: do hacks here
val.tag = format_summary_name(val.tag)
self.put_scalar(val.tag, val.simple_value)
self.set_type(val.tag, 'scalar')
def get(self, key):
return self._summaries.get(key, [])
def has(self, key):
return key in self._summaries
def get_type(self, key):
return self._summaries_type.get(key, 'unknown')
def set_type(self, key, value, check=True):
old_value = self.get_type(key)
if old_value != 'unknown' and check:
assert old_value == value, 'summary type mismatched'
self._summaries_type[key] = value
def _do_average(self, values, meth):
assert meth in ['avg', 'max', 'min', 'sum', 'std']
if meth == 'avg':
return sum(values) / (len(values) + 1e-4)
elif meth == 'max':
return max(values)
elif meth == 'min':
return min(values)
elif meth == 'sum':
return sum(values)
elif meth == 'std':
l = len(values) + 1e-4
return math.sqrt(sum([v ** 2 for v in values]) / l - (sum(values) / l) ** 2)
def average(self, key, top_k=None, meth='avg'):
type = self.get_type(key)
if type == 'scalar':
values = self._summaries.get(key, [])
if top_k is None:
top_k = len(values)
values = values[-top_k:]
return self._do_average(values, meth)
elif type == 'async_scalar':
with summary_async_lock:
values = self._summaries.get(key, [])
last_query = self._summaries_last_query.get(key, 0)
values = values[last_query:]
if len(values):
return self._do_average(values, meth)
return 'N/A'
def update_last_query(self, key):
type = self.get_type(key)
values = self._summaries.get(key, [])
assert type.startswith('async_'), (type, key)
self._summaries_last_query[key] = len(values)
def put_summary_history(trainer, summaries):
mgr = trainer.runtime.get('summary_histories', None)
assert mgr is not None, 'you should first enable summary history'
mgr.put_summaries(summaries)
def put_summary_history_scalar(trainer, name, value):
mgr = trainer.runtime.get('summary_histories', None)
assert mgr is not None, 'you should first enable summary history'
mgr.set_type(name, 'scalar')
mgr.put_scalar(name, value)
def enable_summary_history(trainer, extra_summary_types=None):
def check_proto_contains(proto, tag):
if proto is None:
return False
for v in proto.value:
if v.tag == tag:
return True
return False
def summary_history_on_optimization_before(trainer):
trainer.runtime['summary_histories'] = SummaryHistoryManager()
if extra_summary_types is not None:
for k, v in extra_summary_types.items():
trainer.runtime['summary_histories'].set_type(k, v)
def summary_history_on_iter_after(trainer, inp, out):
mgr = trainer.runtime['summary_histories']
if 'summaries' in trainer.runtime:
summaries = trainer.runtime['summaries']
else:
summaries = tf.Summary()
if isinstance(summaries, collections.Iterable):
for s in summaries:
put_summary_history(trainer, s)
else:
if 'loss' in trainer.runtime and not check_proto_contains(summaries, 'train/loss'):
summaries.value.add(tag='train/loss', simple_value=trainer.runtime['loss'])
error_summary_key = trainer.runtime.get('error_summary_key', None)
if mgr.has(error_summary_key):
if not check_proto_contains(summaries, 'train/error'):
for v in summaries.value:
if clean_summary_suffix(v.tag) == error_summary_key:
trainer.runtime['error'] = v.simple_value
summaries.value.add(tag='train/error', simple_value=trainer.runtime['error'])
put_summary_history(trainer, summaries)
trainer.register_event('optimization:before', summary_history_on_optimization_before)
trainer.register_event('iter:after', summary_history_on_iter_after, priority=8)
def put_tensorboard_summary(trainer, summary, use_internal_gs=False):
if use_internal_gs:
gs = trainer.runtime.get('tensorboard_global_step', 0)
gs += 1
trainer.runtime['tensorboard_global_step'] = gs
else:
gs = trainer.runtime.get('global_step', trainer.iter)
if hasattr(trainer, '_tensorboard_writer'):
trainer._tensorboard_writer.add_summary(summary, gs)
def put_summary_json(trainer, data):
with open(trainer.runtime['json_summary_path'], 'a') as f:
f.write(json.dumps(data) + '\n')
def enable_echo_summary_scalar(trainer, summary_spec=None,
enable_json=True, enable_tensorboard=True, enable_tensorboard_web=True,
json_path=None, tensorboard_path=None, tensorboard_web_port=None):
if summary_spec is None:
summary_spec = {}
def summary_history_scalar_on_epoch_after(trainer):
mgr = trainer.runtime['summary_histories']
extra_summary = tf.Summary()
log_strs = ['Summaries: epoch = {}'.format(trainer.epoch)]
log_json = dict(epoch=trainer.epoch)
for k in sorted(mgr.get_all_summaries('scalar')):
spec = summary_spec.get(k, ['avg'])
for meth in spec:
if not k.startswith('inference'): # do hack for inference
avg = mgr.average(k, trainer.epoch_size, meth=meth)
else:
avg = mgr.average(k, trainer.runtime['inference_epoch_size'], meth=meth)
# MJY(20170623): add stat prefix
tag = 'stat/{}/{}'.format(k, meth)
if avg != 'N/A':
extra_summary.value.add(tag=tag, simple_value=avg)
log_strs.append(' {} = {}'.format(tag, avg))
log_json[tag] = avg
for k in sorted(mgr.get_all_summaries('async_scalar')):
spec = summary_spec.get(k, ['avg'])
for meth in spec:
avg = mgr.average(k, meth=meth)
tag = '{}/{}'.format(k, meth)
if avg != 'N/A':
extra_summary.value.add(tag=tag, simple_value=avg)
log_json[tag] = avg
log_strs.append(' {} = {}'.format(tag, avg))
mgr.update_last_query(k)
if len(log_strs) > 1:
logger.info('\n'.join(log_strs))
if enable_tensorboard and not trainer.runtime['zero_iter']:
put_tensorboard_summary(trainer, extra_summary)
if enable_json and not trainer.runtime['zero_iter']:
put_summary_json(trainer, log_json)
if enable_tensorboard and hasattr(trainer, '_tensorboard_webserver'):
logger.info('Open your tensorboard webpage at http://{}:{}'.format(get_addr(),
trainer.runtime['tensorboard_web_port']))
def json_summary_enable(trainer, js_path=json_path):
if js_path is None:
js_path = osp.join(get_env('dir.root'), 'summary.json')
restored = 'restore_snapshot' in trainer.runtime
if osp.exists(js_path) and not restored:
logger.warn('Removing old summary json: {}.'.format(js_path))
os.remove(js_path)
trainer.runtime['json_summary_path'] = js_path
def tensorboard_summary_enable(trainer, tb_path=tensorboard_path):
if tb_path is None:
tb_path = osp.join(get_env('dir.root'), 'tensorboard')
restored = 'restore_snapshot' in trainer.runtime
if osp.exists(tb_path) and not restored:
logger.warn('Removing old tensorboard directory: {}.'.format(tb_path))
shutil.rmtree(tb_path)
io.mkdir(tb_path)
trainer.runtime['tensorboard_summary_path'] = tb_path
trainer._tensorboard_writer = tf.summary.FileWriter(tb_path, graph=trainer.env.graph)
if enable_tensorboard_web:
port = random.randrange(49152, 65536.)
port = trainer.runtime.get('tensorboard_web_port', port)
trainer._tensorboard_webserver = threading.Thread(
target=_tensorboard_webserver_thread, args=['tensorboard', '--logdir', tb_path, '--port', str(port)],
daemon=True)
trainer._tensorboard_webserver.start()
trainer.runtime['tensorboard_web_port'] = port
def tensorboard_summary_write(trainer, inp, out):
if 'summaries' in trainer.runtime and not trainer.runtime['zero_iter']:
summaries = trainer.runtime['summaries']
if isinstance(summaries, collections.Iterable):
for s in summaries:
put_tensorboard_summary(trainer, s, use_internal_gs=True)
else:
put_tensorboard_summary(trainer, summaries)
trainer.register_event('epoch:after', summary_history_scalar_on_epoch_after)
if enable_json:
trainer.register_event('optimization:before', json_summary_enable)
if enable_tensorboard:
trainer.register_event('optimization:before', tensorboard_summary_enable)
trainer.register_event('iter:after', tensorboard_summary_write, priority=9)
def _tensorboard_webserver_thread(*command):
import atexit
def term(p):
p.terminate()
p = subprocess.Popen(command)
atexit.register(term, p)
def set_error_summary_key(trainer, key):
if not key.startswith('train/'):
key = 'train/' + key
trainer.runtime['error_summary_key'] = key
| <filename>TensorArtist/tartist/plugins/trainer_enhancer/summary.py
# -*- coding:utf8 -*-
# File : summary.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 2/26/17
#
# This file is part of TensorArtist.
import collections
import json
import math
import os
import os.path as osp
import random
import shutil
import subprocess
import threading
import tensorflow as tf
from tartist.core import get_logger, get_env, io
from tartist.data.rflow.utils import get_addr
from tartist.nn.tfutils import format_summary_name, clean_summary_suffix
logger = get_logger()
summary_async_lock = threading.Lock()
class SummaryHistoryManager(object):
def __init__(self):
self._summaries = {}
self._summaries_type = {}
self._summaries_last_query = {}
@property
def all_summaries(self):
return self.get_all_summaries()
def get_all_summaries(self, type=None):
if type is None:
return list(self._summaries_type.keys())
filt = lambda x: type == x
return [k for k, v in self._summaries_type.items() if filt(v)]
def clear_all(self):
self._summaries = {}
def clear(self, key):
self._summaries[key] = []
def put_scalar(self, key, value):
value = float(value)
self._summaries.setdefault(key, []).append(value)
def put_async_scalar(self, key, value):
value = float(value)
with summary_async_lock:
self._summaries.setdefault(key, []).append(value)
def put_summaries(self, summaries):
for val in summaries.value:
if val.WhichOneof('value') == 'simple_value':
# XXX: do hacks here
val.tag = format_summary_name(val.tag)
self.put_scalar(val.tag, val.simple_value)
self.set_type(val.tag, 'scalar')
def get(self, key):
return self._summaries.get(key, [])
def has(self, key):
return key in self._summaries
def get_type(self, key):
return self._summaries_type.get(key, 'unknown')
def set_type(self, key, value, check=True):
old_value = self.get_type(key)
if old_value != 'unknown' and check:
assert old_value == value, 'summary type mismatched'
self._summaries_type[key] = value
def _do_average(self, values, meth):
assert meth in ['avg', 'max', 'min', 'sum', 'std']
if meth == 'avg':
return sum(values) / (len(values) + 1e-4)
elif meth == 'max':
return max(values)
elif meth == 'min':
return min(values)
elif meth == 'sum':
return sum(values)
elif meth == 'std':
l = len(values) + 1e-4
return math.sqrt(sum([v ** 2 for v in values]) / l - (sum(values) / l) ** 2)
def average(self, key, top_k=None, meth='avg'):
type = self.get_type(key)
if type == 'scalar':
values = self._summaries.get(key, [])
if top_k is None:
top_k = len(values)
values = values[-top_k:]
return self._do_average(values, meth)
elif type == 'async_scalar':
with summary_async_lock:
values = self._summaries.get(key, [])
last_query = self._summaries_last_query.get(key, 0)
values = values[last_query:]
if len(values):
return self._do_average(values, meth)
return 'N/A'
def update_last_query(self, key):
type = self.get_type(key)
values = self._summaries.get(key, [])
assert type.startswith('async_'), (type, key)
self._summaries_last_query[key] = len(values)
def put_summary_history(trainer, summaries):
mgr = trainer.runtime.get('summary_histories', None)
assert mgr is not None, 'you should first enable summary history'
mgr.put_summaries(summaries)
def put_summary_history_scalar(trainer, name, value):
mgr = trainer.runtime.get('summary_histories', None)
assert mgr is not None, 'you should first enable summary history'
mgr.set_type(name, 'scalar')
mgr.put_scalar(name, value)
def enable_summary_history(trainer, extra_summary_types=None):
def check_proto_contains(proto, tag):
if proto is None:
return False
for v in proto.value:
if v.tag == tag:
return True
return False
def summary_history_on_optimization_before(trainer):
trainer.runtime['summary_histories'] = SummaryHistoryManager()
if extra_summary_types is not None:
for k, v in extra_summary_types.items():
trainer.runtime['summary_histories'].set_type(k, v)
def summary_history_on_iter_after(trainer, inp, out):
mgr = trainer.runtime['summary_histories']
if 'summaries' in trainer.runtime:
summaries = trainer.runtime['summaries']
else:
summaries = tf.Summary()
if isinstance(summaries, collections.Iterable):
for s in summaries:
put_summary_history(trainer, s)
else:
if 'loss' in trainer.runtime and not check_proto_contains(summaries, 'train/loss'):
summaries.value.add(tag='train/loss', simple_value=trainer.runtime['loss'])
error_summary_key = trainer.runtime.get('error_summary_key', None)
if mgr.has(error_summary_key):
if not check_proto_contains(summaries, 'train/error'):
for v in summaries.value:
if clean_summary_suffix(v.tag) == error_summary_key:
trainer.runtime['error'] = v.simple_value
summaries.value.add(tag='train/error', simple_value=trainer.runtime['error'])
put_summary_history(trainer, summaries)
trainer.register_event('optimization:before', summary_history_on_optimization_before)
trainer.register_event('iter:after', summary_history_on_iter_after, priority=8)
def put_tensorboard_summary(trainer, summary, use_internal_gs=False):
if use_internal_gs:
gs = trainer.runtime.get('tensorboard_global_step', 0)
gs += 1
trainer.runtime['tensorboard_global_step'] = gs
else:
gs = trainer.runtime.get('global_step', trainer.iter)
if hasattr(trainer, '_tensorboard_writer'):
trainer._tensorboard_writer.add_summary(summary, gs)
def put_summary_json(trainer, data):
with open(trainer.runtime['json_summary_path'], 'a') as f:
f.write(json.dumps(data) + '\n')
def enable_echo_summary_scalar(trainer, summary_spec=None,
enable_json=True, enable_tensorboard=True, enable_tensorboard_web=True,
json_path=None, tensorboard_path=None, tensorboard_web_port=None):
if summary_spec is None:
summary_spec = {}
def summary_history_scalar_on_epoch_after(trainer):
mgr = trainer.runtime['summary_histories']
extra_summary = tf.Summary()
log_strs = ['Summaries: epoch = {}'.format(trainer.epoch)]
log_json = dict(epoch=trainer.epoch)
for k in sorted(mgr.get_all_summaries('scalar')):
spec = summary_spec.get(k, ['avg'])
for meth in spec:
if not k.startswith('inference'): # do hack for inference
avg = mgr.average(k, trainer.epoch_size, meth=meth)
else:
avg = mgr.average(k, trainer.runtime['inference_epoch_size'], meth=meth)
# MJY(20170623): add stat prefix
tag = 'stat/{}/{}'.format(k, meth)
if avg != 'N/A':
extra_summary.value.add(tag=tag, simple_value=avg)
log_strs.append(' {} = {}'.format(tag, avg))
log_json[tag] = avg
for k in sorted(mgr.get_all_summaries('async_scalar')):
spec = summary_spec.get(k, ['avg'])
for meth in spec:
avg = mgr.average(k, meth=meth)
tag = '{}/{}'.format(k, meth)
if avg != 'N/A':
extra_summary.value.add(tag=tag, simple_value=avg)
log_json[tag] = avg
log_strs.append(' {} = {}'.format(tag, avg))
mgr.update_last_query(k)
if len(log_strs) > 1:
logger.info('\n'.join(log_strs))
if enable_tensorboard and not trainer.runtime['zero_iter']:
put_tensorboard_summary(trainer, extra_summary)
if enable_json and not trainer.runtime['zero_iter']:
put_summary_json(trainer, log_json)
if enable_tensorboard and hasattr(trainer, '_tensorboard_webserver'):
logger.info('Open your tensorboard webpage at http://{}:{}'.format(get_addr(),
trainer.runtime['tensorboard_web_port']))
def json_summary_enable(trainer, js_path=json_path):
if js_path is None:
js_path = osp.join(get_env('dir.root'), 'summary.json')
restored = 'restore_snapshot' in trainer.runtime
if osp.exists(js_path) and not restored:
logger.warn('Removing old summary json: {}.'.format(js_path))
os.remove(js_path)
trainer.runtime['json_summary_path'] = js_path
def tensorboard_summary_enable(trainer, tb_path=tensorboard_path):
if tb_path is None:
tb_path = osp.join(get_env('dir.root'), 'tensorboard')
restored = 'restore_snapshot' in trainer.runtime
if osp.exists(tb_path) and not restored:
logger.warn('Removing old tensorboard directory: {}.'.format(tb_path))
shutil.rmtree(tb_path)
io.mkdir(tb_path)
trainer.runtime['tensorboard_summary_path'] = tb_path
trainer._tensorboard_writer = tf.summary.FileWriter(tb_path, graph=trainer.env.graph)
if enable_tensorboard_web:
port = random.randrange(49152, 65536.)
port = trainer.runtime.get('tensorboard_web_port', port)
trainer._tensorboard_webserver = threading.Thread(
target=_tensorboard_webserver_thread, args=['tensorboard', '--logdir', tb_path, '--port', str(port)],
daemon=True)
trainer._tensorboard_webserver.start()
trainer.runtime['tensorboard_web_port'] = port
def tensorboard_summary_write(trainer, inp, out):
if 'summaries' in trainer.runtime and not trainer.runtime['zero_iter']:
summaries = trainer.runtime['summaries']
if isinstance(summaries, collections.Iterable):
for s in summaries:
put_tensorboard_summary(trainer, s, use_internal_gs=True)
else:
put_tensorboard_summary(trainer, summaries)
trainer.register_event('epoch:after', summary_history_scalar_on_epoch_after)
if enable_json:
trainer.register_event('optimization:before', json_summary_enable)
if enable_tensorboard:
trainer.register_event('optimization:before', tensorboard_summary_enable)
trainer.register_event('iter:after', tensorboard_summary_write, priority=9)
def _tensorboard_webserver_thread(*command):
import atexit
def term(p):
p.terminate()
p = subprocess.Popen(command)
atexit.register(term, p)
def set_error_summary_key(trainer, key):
if not key.startswith('train/'):
key = 'train/' + key
trainer.runtime['error_summary_key'] = key
| en | 0.500839 | # -*- coding:utf8 -*- # File : summary.py # Author : <NAME> # Email : <EMAIL> # Date : 2/26/17 # # This file is part of TensorArtist. # XXX: do hacks here # do hack for inference # MJY(20170623): add stat prefix | 1.866648 | 2 |
conformer/__init__.py | zhengx18/conformer | 0 | 6612602 | <gh_stars>0
from conformer.conformer import ConformerConvModule
| from conformer.conformer import ConformerConvModule | none | 1 | 1.14558 | 1 | |
lunas/iterator.py | MicrohexHQ/Lunas | 0 | 6612603 | <gh_stars>0
from collections import deque
from typing import List, Dict, Callable, Any
from overrides import overrides
from lunas.batch import Batch, Cache
from lunas.persistable import Persistable
from lunas.readers import BaseReader
from lunas.utils import get_state_dict, load_state_dict
class BaseIterator(Persistable):
def __init__(self) -> None:
"""Initialize the iterator.
Args:
reader: A `Reader` object.
batch_size: A `int` scalar that limits the size of returned batch.
padded_size: A `int` scalar that limits the size of resulting batch tensor.
cache_size: A `int` scalar. Prefetch `cache_size` samples from the `reader` in `self.cache`.
sample_size_fn: (Optional.) A callable function that calculates size for each sample.
The size of each sample will then be summed up as the size of the batch. If not
specified, default to 1 for each sample, which is equivalent to `lambda sample: 1`.
padded_size_fn: (Optional.) A callable function that returns the padded size given a set of samples.
collate_fn: (Optional.) A callable function that converts a list of samples to model inputs.
sort_cache_by: (Optional.) A callable function that returns a sorting key for each sample. If not
specified, leave the cache as it is. The samples will be sorted in ascending order.
sort_batch_by: (Optional.) A callable function that returns a sorting key for each sample. If not
specified, leave the batch as it is. The samples will be sorted in ascending order.
drop_tails: (Optional.) Whether the last samples of the dataset that cannot fill a batch should be dropped.
strip_batch:
"""
super().__init__()
# bookkeeping params
self._step_in_epoch = 0
self._step = 0
self._epoch = 0
self._inclusions = ['_inclusions', '_step', '_step_in_epoch', '_epoch']
@property
def step_in_epoch(self):
return self._step_in_epoch
@property
def step(self):
return self._step
@property
def epoch(self):
return self._epoch
def reset(self):
self._step_in_epoch = 0
self._step = 0
self._epoch = 0
def reset_epoch(self):
self._step_in_epoch = 0
def iter_epoch(self, before_epoch=None, after_epoch=None):
raise NotImplementedError
def while_true(self, predicate: Callable[[], bool], before_epoch=None, after_epoch=None):
"""Iterates through the dataset by a given stopping criteria.
Args:
predicate: A callable function. This function is evaluated to determine
whether iteration should continue or not.
before_epoch:
after_epoch:
Returns:
(batch, inputs): A `Tuple` consists of a `Batch` object and model inputs. When `self.collate_fn`
is None, the returned `inputs` is also None.
"""
epoch_iter = self.iter_epoch(before_epoch, after_epoch)
if predicate is not None:
while predicate():
try:
batch = next(epoch_iter)
except StopIteration:
epoch_iter = self.iter_epoch(before_epoch, after_epoch)
continue
yield batch
else:
for batch in epoch_iter:
yield batch
@overrides
def state_dict(self) -> Dict:
return get_state_dict(self, recursive=True, inclusions=self._inclusions)
@overrides
def load_state_dict(self, state_dict: Dict) -> None:
load_state_dict(self, state_dict)
def __call__(self, while_predicate: Callable[[], bool] = None, before_epoch=None, after_epoch=None):
return self.while_true(while_predicate, before_epoch, after_epoch)
class Iterator(BaseIterator):
"""An iterator that iterates through a `Reader`.
This class performs multi-pass iterations over the dataset and maintains
the iteration state.
"""
def __init__(self, reader: BaseReader, batch_size, padded_size=None, cache_size: int = 1000,
sample_size_fn: Callable[[Any], int] = None,
padded_size_fn: Callable[[List[Any]], int] = None,
collate_fn: Callable[[List[Any]], Any] = lambda x:x, sort_cache_by: Callable[[Any], int] = None,
sort_batch_by: Callable[[Any], int] = None,
drop_tails=False, strip_batch=False):
"""Initialize the iterator.
Args:
reader: A `Reader` object.
batch_size: A `int` scalar that limits the size of returned batch.
padded_size: A `int` scalar that limits the size of resulting batch tensor.
cache_size: A `int` scalar. Prefetch `cache_size` samples from the `reader` in `self.cache`.
sample_size_fn: (Optional.) A callable function that calculates size for each sample.
The size of each sample will then be summed up as the size of the batch. If not
specified, default to 1 for each sample, which is equivalent to `lambda sample: 1`.
padded_size_fn: (Optional.) A callable function that returns the padded size given a set of samples.
collate_fn: (Optional.) A callable function that converts a list of samples to model inputs.
sort_cache_by: (Optional.) A callable function that returns a sorting key for each sample. If not
specified, leave the cache as it is. The samples will be sorted in ascending order.
sort_batch_by: (Optional.) A callable function that returns a sorting key for each sample. If not
specified, leave the batch as it is. The samples will be sorted in ascending order.
drop_tails: (Optional.) Whether the last samples of the dataset that cannot fill a batch should be dropped.
strip_batch:
"""
super().__init__()
self._reader = reader
self._batch_size = batch_size
self._padded_size = padded_size
self._cache_size = cache_size
self._sample_size_fn = sample_size_fn
self._padded_size_fn = padded_size_fn
self._collate_fn = collate_fn
self._sort_cache_by = sort_cache_by
self._sort_batch_by = sort_batch_by
self._drop_tails = drop_tails
self._strip_batch = strip_batch
self._cache = Cache(cache_size, sample_size_fn)
self._remains: deque = deque()
self._stripped: deque = deque()
self._inclusions += ['_reader', '_cache', '_remains','_stripped']
self.check_batch_size(batch_size, cache_size)
self.reset()
@property
def cache_size(self):
return self._cache_size
@property
def batch_size(self):
return self._batch_size
def set_batch_size(self, batch_size) -> None:
"""Allows dynamic batch size at runtime.
Args:
batch_size: A `int` scalar.
"""
self.check_batch_size(batch_size)
self._batch_size = batch_size
def check_batch_size(self, batch_size, cache_size=None) -> None:
"""Checks whether batch_size is < cache_size.
To ensure rationality, batch_size must be < cache_size.
Args:
batch_size: A `int` scalar.
cache_size: A `int` scalar.
"""
cache_size = cache_size or self._cache_size
if batch_size > cache_size:
raise RuntimeError(
f'Batch size ({batch_size}) should be less than cache size ({cache_size}). '
f'Please lower the batch size or increase the cache size.'
)
@overrides
def reset(self):
super().reset()
self._remains.clear()
self._cache.pop_all() # discard
self._reader = iter(self._reader)
@overrides
def reset_epoch(self):
super().reset_epoch()
self._step_in_epoch = 0
self._remains.clear()
self._cache.pop_all()
@overrides
def iter_epoch(self, before_epoch=None, after_epoch=None):
"""Iterate through the dataset for one epoch.
For the last batch, it will be dropped if its size is smaller
than 2/3 of the specified batch size.
"""
# self.reset_epoch()
cache = self._cache
remains = self._remains
stripped = self._stripped
end_of_epoch = False
sort_batch = False
if before_epoch is not None and self.step_in_epoch == 0:
before_epoch()
while True:
batch = Batch(self.batch_size, self._sample_size_fn, self._padded_size, self._padded_size_fn)
if cache.effective_size < self.batch_size * 2 / 3.0:
if end_of_epoch:
# Raise error when the whole dataset cannot form a batch
if self.step == 0:
raise RuntimeError(
f'Size of the dataset ({len(remains)}) '
f'is smaller than batch size ({self.batch_size}). '
f'Please lower the batch size or '
f'check whether the dataset is too small.'
)
self._reader = iter(self._reader)
if self._drop_tails or len(remains) == 0:
break
else:
# The last batch
batch.from_deque(remains, self.batch_size)
batch.from_iter(cache, self.batch_size)
batch.sort(self._sort_batch_by or self._sort_cache_by)
self._step_in_epoch += 1
self._step += 1
yield self._prepare_batch(batch)
break
# Consume samples from cache before filling-in
remains += cache.pop_all()
try:
# Fill cache
cache.from_iter(self._reader, raise_when_stopped=True)
except StopIteration:
# Mark as end
end_of_epoch = True
cache.sort(self._sort_cache_by)
if self.batch_size == self.cache_size:
# Simply return the cache as a batch to avoid sorting again.
batch = cache
cache = Cache(self.cache_size, self._sample_size_fn)
self._cache = cache
else:
if stripped:
batch.from_deque(stripped, self.batch_size)
if remains:
batch.from_deque(remains, self.batch_size)
sort_batch = True
size_diff = batch.from_iter(cache, self.batch_size)
sort_batch = size_diff > 0 or sort_batch
if not batch.filled:
# the cache is exhausted while batch is not filled
# for the last unfilled batch, revert it
if end_of_epoch:
batch.revert()
remains += batch.pop_all()
else:
# filled
# strip batch size
if self._strip_batch:
stripped += batch.strip(self.batch_size)
if sort_batch:
batch.sort(self._sort_batch_by or self._sort_cache_by)
sort_batch = False
self._step_in_epoch += 1
self._step += 1
yield self._prepare_batch(batch)
if after_epoch is not None:
after_epoch()
self._epoch += 1
self._step_in_epoch = 0
def _prepare_batch(self, batch: Batch):
if self._collate_fn:
batch.process(self._collate_fn)
return batch
class GroupIterator(BaseIterator):
def __init__(self,iterator:BaseIterator,size:int) -> None:
super().__init__()
self._iterator=iterator
self._size=size
self._inclusions+=['_iterator','_size']
self.reset()
@overrides
def iter_epoch(self, before_epoch=None, after_epoch=None):
if before_epoch is not None and self.step_in_epoch ==0:
before_epoch()
group=[]
for i,batch in enumerate(self._iterator.iter_epoch(before_epoch, after_epoch), 1):
group.append(batch)
if i % self._size==0:
self._step_in_epoch+=1
self._step+=1
yield group
group=[]
if group:
self._step_in_epoch+=1
self._step+=1
yield group
group=[]
self._epoch+=1
self._step_in_epoch=0
| from collections import deque
from typing import List, Dict, Callable, Any
from overrides import overrides
from lunas.batch import Batch, Cache
from lunas.persistable import Persistable
from lunas.readers import BaseReader
from lunas.utils import get_state_dict, load_state_dict
class BaseIterator(Persistable):
def __init__(self) -> None:
"""Initialize the iterator.
Args:
reader: A `Reader` object.
batch_size: A `int` scalar that limits the size of returned batch.
padded_size: A `int` scalar that limits the size of resulting batch tensor.
cache_size: A `int` scalar. Prefetch `cache_size` samples from the `reader` in `self.cache`.
sample_size_fn: (Optional.) A callable function that calculates size for each sample.
The size of each sample will then be summed up as the size of the batch. If not
specified, default to 1 for each sample, which is equivalent to `lambda sample: 1`.
padded_size_fn: (Optional.) A callable function that returns the padded size given a set of samples.
collate_fn: (Optional.) A callable function that converts a list of samples to model inputs.
sort_cache_by: (Optional.) A callable function that returns a sorting key for each sample. If not
specified, leave the cache as it is. The samples will be sorted in ascending order.
sort_batch_by: (Optional.) A callable function that returns a sorting key for each sample. If not
specified, leave the batch as it is. The samples will be sorted in ascending order.
drop_tails: (Optional.) Whether the last samples of the dataset that cannot fill a batch should be dropped.
strip_batch:
"""
super().__init__()
# bookkeeping params
self._step_in_epoch = 0
self._step = 0
self._epoch = 0
self._inclusions = ['_inclusions', '_step', '_step_in_epoch', '_epoch']
@property
def step_in_epoch(self):
return self._step_in_epoch
@property
def step(self):
return self._step
@property
def epoch(self):
return self._epoch
def reset(self):
self._step_in_epoch = 0
self._step = 0
self._epoch = 0
def reset_epoch(self):
self._step_in_epoch = 0
def iter_epoch(self, before_epoch=None, after_epoch=None):
raise NotImplementedError
def while_true(self, predicate: Callable[[], bool], before_epoch=None, after_epoch=None):
"""Iterates through the dataset by a given stopping criteria.
Args:
predicate: A callable function. This function is evaluated to determine
whether iteration should continue or not.
before_epoch:
after_epoch:
Returns:
(batch, inputs): A `Tuple` consists of a `Batch` object and model inputs. When `self.collate_fn`
is None, the returned `inputs` is also None.
"""
epoch_iter = self.iter_epoch(before_epoch, after_epoch)
if predicate is not None:
while predicate():
try:
batch = next(epoch_iter)
except StopIteration:
epoch_iter = self.iter_epoch(before_epoch, after_epoch)
continue
yield batch
else:
for batch in epoch_iter:
yield batch
@overrides
def state_dict(self) -> Dict:
return get_state_dict(self, recursive=True, inclusions=self._inclusions)
@overrides
def load_state_dict(self, state_dict: Dict) -> None:
load_state_dict(self, state_dict)
def __call__(self, while_predicate: Callable[[], bool] = None, before_epoch=None, after_epoch=None):
return self.while_true(while_predicate, before_epoch, after_epoch)
class Iterator(BaseIterator):
"""An iterator that iterates through a `Reader`.
This class performs multi-pass iterations over the dataset and maintains
the iteration state.
"""
def __init__(self, reader: BaseReader, batch_size, padded_size=None, cache_size: int = 1000,
sample_size_fn: Callable[[Any], int] = None,
padded_size_fn: Callable[[List[Any]], int] = None,
collate_fn: Callable[[List[Any]], Any] = lambda x:x, sort_cache_by: Callable[[Any], int] = None,
sort_batch_by: Callable[[Any], int] = None,
drop_tails=False, strip_batch=False):
"""Initialize the iterator.
Args:
reader: A `Reader` object.
batch_size: A `int` scalar that limits the size of returned batch.
padded_size: A `int` scalar that limits the size of resulting batch tensor.
cache_size: A `int` scalar. Prefetch `cache_size` samples from the `reader` in `self.cache`.
sample_size_fn: (Optional.) A callable function that calculates size for each sample.
The size of each sample will then be summed up as the size of the batch. If not
specified, default to 1 for each sample, which is equivalent to `lambda sample: 1`.
padded_size_fn: (Optional.) A callable function that returns the padded size given a set of samples.
collate_fn: (Optional.) A callable function that converts a list of samples to model inputs.
sort_cache_by: (Optional.) A callable function that returns a sorting key for each sample. If not
specified, leave the cache as it is. The samples will be sorted in ascending order.
sort_batch_by: (Optional.) A callable function that returns a sorting key for each sample. If not
specified, leave the batch as it is. The samples will be sorted in ascending order.
drop_tails: (Optional.) Whether the last samples of the dataset that cannot fill a batch should be dropped.
strip_batch:
"""
super().__init__()
self._reader = reader
self._batch_size = batch_size
self._padded_size = padded_size
self._cache_size = cache_size
self._sample_size_fn = sample_size_fn
self._padded_size_fn = padded_size_fn
self._collate_fn = collate_fn
self._sort_cache_by = sort_cache_by
self._sort_batch_by = sort_batch_by
self._drop_tails = drop_tails
self._strip_batch = strip_batch
self._cache = Cache(cache_size, sample_size_fn)
self._remains: deque = deque()
self._stripped: deque = deque()
self._inclusions += ['_reader', '_cache', '_remains','_stripped']
self.check_batch_size(batch_size, cache_size)
self.reset()
@property
def cache_size(self):
return self._cache_size
@property
def batch_size(self):
return self._batch_size
def set_batch_size(self, batch_size) -> None:
"""Allows dynamic batch size at runtime.
Args:
batch_size: A `int` scalar.
"""
self.check_batch_size(batch_size)
self._batch_size = batch_size
def check_batch_size(self, batch_size, cache_size=None) -> None:
"""Checks whether batch_size is < cache_size.
To ensure rationality, batch_size must be < cache_size.
Args:
batch_size: A `int` scalar.
cache_size: A `int` scalar.
"""
cache_size = cache_size or self._cache_size
if batch_size > cache_size:
raise RuntimeError(
f'Batch size ({batch_size}) should be less than cache size ({cache_size}). '
f'Please lower the batch size or increase the cache size.'
)
@overrides
def reset(self):
super().reset()
self._remains.clear()
self._cache.pop_all() # discard
self._reader = iter(self._reader)
@overrides
def reset_epoch(self):
super().reset_epoch()
self._step_in_epoch = 0
self._remains.clear()
self._cache.pop_all()
@overrides
def iter_epoch(self, before_epoch=None, after_epoch=None):
"""Iterate through the dataset for one epoch.
For the last batch, it will be dropped if its size is smaller
than 2/3 of the specified batch size.
"""
# self.reset_epoch()
cache = self._cache
remains = self._remains
stripped = self._stripped
end_of_epoch = False
sort_batch = False
if before_epoch is not None and self.step_in_epoch == 0:
before_epoch()
while True:
batch = Batch(self.batch_size, self._sample_size_fn, self._padded_size, self._padded_size_fn)
if cache.effective_size < self.batch_size * 2 / 3.0:
if end_of_epoch:
# Raise error when the whole dataset cannot form a batch
if self.step == 0:
raise RuntimeError(
f'Size of the dataset ({len(remains)}) '
f'is smaller than batch size ({self.batch_size}). '
f'Please lower the batch size or '
f'check whether the dataset is too small.'
)
self._reader = iter(self._reader)
if self._drop_tails or len(remains) == 0:
break
else:
# The last batch
batch.from_deque(remains, self.batch_size)
batch.from_iter(cache, self.batch_size)
batch.sort(self._sort_batch_by or self._sort_cache_by)
self._step_in_epoch += 1
self._step += 1
yield self._prepare_batch(batch)
break
# Consume samples from cache before filling-in
remains += cache.pop_all()
try:
# Fill cache
cache.from_iter(self._reader, raise_when_stopped=True)
except StopIteration:
# Mark as end
end_of_epoch = True
cache.sort(self._sort_cache_by)
if self.batch_size == self.cache_size:
# Simply return the cache as a batch to avoid sorting again.
batch = cache
cache = Cache(self.cache_size, self._sample_size_fn)
self._cache = cache
else:
if stripped:
batch.from_deque(stripped, self.batch_size)
if remains:
batch.from_deque(remains, self.batch_size)
sort_batch = True
size_diff = batch.from_iter(cache, self.batch_size)
sort_batch = size_diff > 0 or sort_batch
if not batch.filled:
# the cache is exhausted while batch is not filled
# for the last unfilled batch, revert it
if end_of_epoch:
batch.revert()
remains += batch.pop_all()
else:
# filled
# strip batch size
if self._strip_batch:
stripped += batch.strip(self.batch_size)
if sort_batch:
batch.sort(self._sort_batch_by or self._sort_cache_by)
sort_batch = False
self._step_in_epoch += 1
self._step += 1
yield self._prepare_batch(batch)
if after_epoch is not None:
after_epoch()
self._epoch += 1
self._step_in_epoch = 0
def _prepare_batch(self, batch: Batch):
if self._collate_fn:
batch.process(self._collate_fn)
return batch
class GroupIterator(BaseIterator):
def __init__(self,iterator:BaseIterator,size:int) -> None:
super().__init__()
self._iterator=iterator
self._size=size
self._inclusions+=['_iterator','_size']
self.reset()
@overrides
def iter_epoch(self, before_epoch=None, after_epoch=None):
if before_epoch is not None and self.step_in_epoch ==0:
before_epoch()
group=[]
for i,batch in enumerate(self._iterator.iter_epoch(before_epoch, after_epoch), 1):
group.append(batch)
if i % self._size==0:
self._step_in_epoch+=1
self._step+=1
yield group
group=[]
if group:
self._step_in_epoch+=1
self._step+=1
yield group
group=[]
self._epoch+=1
self._step_in_epoch=0 | en | 0.748119 | Initialize the iterator. Args: reader: A `Reader` object. batch_size: A `int` scalar that limits the size of returned batch. padded_size: A `int` scalar that limits the size of resulting batch tensor. cache_size: A `int` scalar. Prefetch `cache_size` samples from the `reader` in `self.cache`. sample_size_fn: (Optional.) A callable function that calculates size for each sample. The size of each sample will then be summed up as the size of the batch. If not specified, default to 1 for each sample, which is equivalent to `lambda sample: 1`. padded_size_fn: (Optional.) A callable function that returns the padded size given a set of samples. collate_fn: (Optional.) A callable function that converts a list of samples to model inputs. sort_cache_by: (Optional.) A callable function that returns a sorting key for each sample. If not specified, leave the cache as it is. The samples will be sorted in ascending order. sort_batch_by: (Optional.) A callable function that returns a sorting key for each sample. If not specified, leave the batch as it is. The samples will be sorted in ascending order. drop_tails: (Optional.) Whether the last samples of the dataset that cannot fill a batch should be dropped. strip_batch: # bookkeeping params Iterates through the dataset by a given stopping criteria. Args: predicate: A callable function. This function is evaluated to determine whether iteration should continue or not. before_epoch: after_epoch: Returns: (batch, inputs): A `Tuple` consists of a `Batch` object and model inputs. When `self.collate_fn` is None, the returned `inputs` is also None. An iterator that iterates through a `Reader`. This class performs multi-pass iterations over the dataset and maintains the iteration state. Initialize the iterator. Args: reader: A `Reader` object. batch_size: A `int` scalar that limits the size of returned batch. padded_size: A `int` scalar that limits the size of resulting batch tensor. cache_size: A `int` scalar. Prefetch `cache_size` samples from the `reader` in `self.cache`. sample_size_fn: (Optional.) A callable function that calculates size for each sample. The size of each sample will then be summed up as the size of the batch. If not specified, default to 1 for each sample, which is equivalent to `lambda sample: 1`. padded_size_fn: (Optional.) A callable function that returns the padded size given a set of samples. collate_fn: (Optional.) A callable function that converts a list of samples to model inputs. sort_cache_by: (Optional.) A callable function that returns a sorting key for each sample. If not specified, leave the cache as it is. The samples will be sorted in ascending order. sort_batch_by: (Optional.) A callable function that returns a sorting key for each sample. If not specified, leave the batch as it is. The samples will be sorted in ascending order. drop_tails: (Optional.) Whether the last samples of the dataset that cannot fill a batch should be dropped. strip_batch: Allows dynamic batch size at runtime. Args: batch_size: A `int` scalar. Checks whether batch_size is < cache_size. To ensure rationality, batch_size must be < cache_size. Args: batch_size: A `int` scalar. cache_size: A `int` scalar. # discard Iterate through the dataset for one epoch. For the last batch, it will be dropped if its size is smaller than 2/3 of the specified batch size. # self.reset_epoch() # Raise error when the whole dataset cannot form a batch # The last batch # Consume samples from cache before filling-in # Fill cache # Mark as end # Simply return the cache as a batch to avoid sorting again. # the cache is exhausted while batch is not filled # for the last unfilled batch, revert it # filled # strip batch size | 2.413343 | 2 |
generated-sources/python/mojang-api/openapi_client/com/github/asyncmc/mojang/api/python/api/skin_operations_api.py | AsyncMC/Mojang-API-Libs | 0 | 6612604 | # coding: utf-8
"""
Mojang API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2020-06-05
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
class SkinOperationsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def change_player_skin(self, stripped_uuid, url, **kwargs): # noqa: E501
"""Changes the player skin by URL # noqa: E501
This will set the skin for the selected profile, but Mojang's servers will fetch the skin from a URL. This will also work for legacy accounts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.change_player_skin(stripped_uuid, url, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:param str url: The URL which Mojang servers will download and apply the skin (required)
:param SkinModel model:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.change_player_skin_with_http_info(stripped_uuid, url, **kwargs) # noqa: E501
else:
(data) = self.change_player_skin_with_http_info(stripped_uuid, url, **kwargs) # noqa: E501
return data
def change_player_skin_with_http_info(self, stripped_uuid, url, **kwargs): # noqa: E501
"""Changes the player skin by URL # noqa: E501
This will set the skin for the selected profile, but Mojang's servers will fetch the skin from a URL. This will also work for legacy accounts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.change_player_skin_with_http_info(stripped_uuid, url, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:param str url: The URL which Mojang servers will download and apply the skin (required)
:param SkinModel model:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['stripped_uuid', 'url', 'model'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method change_player_skin" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'stripped_uuid' is set
if ('stripped_uuid' not in local_var_params or
local_var_params['stripped_uuid'] is None):
raise ValueError("Missing the required parameter `stripped_uuid` when calling `change_player_skin`") # noqa: E501
# verify the required parameter 'url' is set
if ('url' not in local_var_params or
local_var_params['url'] is None):
raise ValueError("Missing the required parameter `url` when calling `change_player_skin`") # noqa: E501
collection_formats = {}
path_params = {}
if 'stripped_uuid' in local_var_params:
path_params['stripped_uuid'] = local_var_params['stripped_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'model' in local_var_params:
form_params.append(('model', local_var_params['model'])) # noqa: E501
if 'url' in local_var_params:
form_params.append(('url', local_var_params['url'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['PlayerAccessToken'] # noqa: E501
return self.api_client.call_api(
'/user/profile/{stripped_uuid}/skin', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_player_skin(self, stripped_uuid, **kwargs): # noqa: E501
"""Resets the player skin to default # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_player_skin(stripped_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.reset_player_skin_with_http_info(stripped_uuid, **kwargs) # noqa: E501
else:
(data) = self.reset_player_skin_with_http_info(stripped_uuid, **kwargs) # noqa: E501
return data
def reset_player_skin_with_http_info(self, stripped_uuid, **kwargs): # noqa: E501
"""Resets the player skin to default # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_player_skin_with_http_info(stripped_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['stripped_uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reset_player_skin" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'stripped_uuid' is set
if ('stripped_uuid' not in local_var_params or
local_var_params['stripped_uuid'] is None):
raise ValueError("Missing the required parameter `stripped_uuid` when calling `reset_player_skin`") # noqa: E501
collection_formats = {}
path_params = {}
if 'stripped_uuid' in local_var_params:
path_params['stripped_uuid'] = local_var_params['stripped_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['PlayerAccessToken'] # noqa: E501
return self.api_client.call_api(
'/user/profile/{stripped_uuid}/skin', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_player_skin(self, stripped_uuid, file, **kwargs): # noqa: E501
"""Changes the player skin by upload # noqa: E501
This uploads a skin to Mojang's servers. It also sets the users skin. This works on legacy counts as well. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_player_skin(stripped_uuid, file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:param file file: The skin image in PNG format (required)
:param SkinModel model:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.upload_player_skin_with_http_info(stripped_uuid, file, **kwargs) # noqa: E501
else:
(data) = self.upload_player_skin_with_http_info(stripped_uuid, file, **kwargs) # noqa: E501
return data
def upload_player_skin_with_http_info(self, stripped_uuid, file, **kwargs): # noqa: E501
"""Changes the player skin by upload # noqa: E501
This uploads a skin to Mojang's servers. It also sets the users skin. This works on legacy counts as well. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_player_skin_with_http_info(stripped_uuid, file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:param file file: The skin image in PNG format (required)
:param SkinModel model:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['stripped_uuid', 'file', 'model'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_player_skin" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'stripped_uuid' is set
if ('stripped_uuid' not in local_var_params or
local_var_params['stripped_uuid'] is None):
raise ValueError("Missing the required parameter `stripped_uuid` when calling `upload_player_skin`") # noqa: E501
# verify the required parameter 'file' is set
if ('file' not in local_var_params or
local_var_params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `upload_player_skin`") # noqa: E501
collection_formats = {}
path_params = {}
if 'stripped_uuid' in local_var_params:
path_params['stripped_uuid'] = local_var_params['stripped_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'model' in local_var_params:
form_params.append(('model', local_var_params['model'])) # noqa: E501
if 'file' in local_var_params:
local_var_files['file'] = local_var_params['file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['PlayerAccessToken'] # noqa: E501
return self.api_client.call_api(
'/user/profile/{stripped_uuid}/skin', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| # coding: utf-8
"""
Mojang API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2020-06-05
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
class SkinOperationsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def change_player_skin(self, stripped_uuid, url, **kwargs): # noqa: E501
"""Changes the player skin by URL # noqa: E501
This will set the skin for the selected profile, but Mojang's servers will fetch the skin from a URL. This will also work for legacy accounts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.change_player_skin(stripped_uuid, url, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:param str url: The URL which Mojang servers will download and apply the skin (required)
:param SkinModel model:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.change_player_skin_with_http_info(stripped_uuid, url, **kwargs) # noqa: E501
else:
(data) = self.change_player_skin_with_http_info(stripped_uuid, url, **kwargs) # noqa: E501
return data
def change_player_skin_with_http_info(self, stripped_uuid, url, **kwargs): # noqa: E501
"""Changes the player skin by URL # noqa: E501
This will set the skin for the selected profile, but Mojang's servers will fetch the skin from a URL. This will also work for legacy accounts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.change_player_skin_with_http_info(stripped_uuid, url, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:param str url: The URL which Mojang servers will download and apply the skin (required)
:param SkinModel model:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['stripped_uuid', 'url', 'model'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method change_player_skin" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'stripped_uuid' is set
if ('stripped_uuid' not in local_var_params or
local_var_params['stripped_uuid'] is None):
raise ValueError("Missing the required parameter `stripped_uuid` when calling `change_player_skin`") # noqa: E501
# verify the required parameter 'url' is set
if ('url' not in local_var_params or
local_var_params['url'] is None):
raise ValueError("Missing the required parameter `url` when calling `change_player_skin`") # noqa: E501
collection_formats = {}
path_params = {}
if 'stripped_uuid' in local_var_params:
path_params['stripped_uuid'] = local_var_params['stripped_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'model' in local_var_params:
form_params.append(('model', local_var_params['model'])) # noqa: E501
if 'url' in local_var_params:
form_params.append(('url', local_var_params['url'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['PlayerAccessToken'] # noqa: E501
return self.api_client.call_api(
'/user/profile/{stripped_uuid}/skin', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_player_skin(self, stripped_uuid, **kwargs): # noqa: E501
"""Resets the player skin to default # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_player_skin(stripped_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.reset_player_skin_with_http_info(stripped_uuid, **kwargs) # noqa: E501
else:
(data) = self.reset_player_skin_with_http_info(stripped_uuid, **kwargs) # noqa: E501
return data
def reset_player_skin_with_http_info(self, stripped_uuid, **kwargs): # noqa: E501
"""Resets the player skin to default # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_player_skin_with_http_info(stripped_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['stripped_uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reset_player_skin" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'stripped_uuid' is set
if ('stripped_uuid' not in local_var_params or
local_var_params['stripped_uuid'] is None):
raise ValueError("Missing the required parameter `stripped_uuid` when calling `reset_player_skin`") # noqa: E501
collection_formats = {}
path_params = {}
if 'stripped_uuid' in local_var_params:
path_params['stripped_uuid'] = local_var_params['stripped_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['PlayerAccessToken'] # noqa: E501
return self.api_client.call_api(
'/user/profile/{stripped_uuid}/skin', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_player_skin(self, stripped_uuid, file, **kwargs): # noqa: E501
"""Changes the player skin by upload # noqa: E501
This uploads a skin to Mojang's servers. It also sets the users skin. This works on legacy counts as well. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_player_skin(stripped_uuid, file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:param file file: The skin image in PNG format (required)
:param SkinModel model:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.upload_player_skin_with_http_info(stripped_uuid, file, **kwargs) # noqa: E501
else:
(data) = self.upload_player_skin_with_http_info(stripped_uuid, file, **kwargs) # noqa: E501
return data
def upload_player_skin_with_http_info(self, stripped_uuid, file, **kwargs): # noqa: E501
"""Changes the player skin by upload # noqa: E501
This uploads a skin to Mojang's servers. It also sets the users skin. This works on legacy counts as well. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_player_skin_with_http_info(stripped_uuid, file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stripped_uuid: The player UUID without hyphens (required)
:param file file: The skin image in PNG format (required)
:param SkinModel model:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['stripped_uuid', 'file', 'model'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_player_skin" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'stripped_uuid' is set
if ('stripped_uuid' not in local_var_params or
local_var_params['stripped_uuid'] is None):
raise ValueError("Missing the required parameter `stripped_uuid` when calling `upload_player_skin`") # noqa: E501
# verify the required parameter 'file' is set
if ('file' not in local_var_params or
local_var_params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `upload_player_skin`") # noqa: E501
collection_formats = {}
path_params = {}
if 'stripped_uuid' in local_var_params:
path_params['stripped_uuid'] = local_var_params['stripped_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'model' in local_var_params:
form_params.append(('model', local_var_params['model'])) # noqa: E501
if 'file' in local_var_params:
local_var_files['file'] = local_var_params['file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['PlayerAccessToken'] # noqa: E501
return self.api_client.call_api(
'/user/profile/{stripped_uuid}/skin', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| en | 0.713868 | # coding: utf-8 Mojang API No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 OpenAPI spec version: 2020-06-05 Generated by: https://openapi-generator.tech # noqa: F401 # python 2 and python 3 compatibility library NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. # noqa: E501 Changes the player skin by URL # noqa: E501 This will set the skin for the selected profile, but Mojang's servers will fetch the skin from a URL. This will also work for legacy accounts. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.change_player_skin(stripped_uuid, url, async_req=True) >>> result = thread.get() :param async_req bool :param str stripped_uuid: The player UUID without hyphens (required) :param str url: The URL which Mojang servers will download and apply the skin (required) :param SkinModel model: :return: None If the method is called asynchronously, returns the request thread. # noqa: E501 # noqa: E501 # noqa: E501 Changes the player skin by URL # noqa: E501 This will set the skin for the selected profile, but Mojang's servers will fetch the skin from a URL. This will also work for legacy accounts. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.change_player_skin_with_http_info(stripped_uuid, url, async_req=True) >>> result = thread.get() :param async_req bool :param str stripped_uuid: The player UUID without hyphens (required) :param str url: The URL which Mojang servers will download and apply the skin (required) :param SkinModel model: :return: None If the method is called asynchronously, returns the request thread. # noqa: E501 # verify the required parameter 'stripped_uuid' is set # noqa: E501 # verify the required parameter 'url' is set # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # HTTP header `Accept` # noqa: E501 # HTTP header `Content-Type` # noqa: E501 # noqa: E501 # Authentication setting # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 Resets the player skin to default # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.reset_player_skin(stripped_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str stripped_uuid: The player UUID without hyphens (required) :return: None If the method is called asynchronously, returns the request thread. # noqa: E501 # noqa: E501 # noqa: E501 Resets the player skin to default # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.reset_player_skin_with_http_info(stripped_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str stripped_uuid: The player UUID without hyphens (required) :return: None If the method is called asynchronously, returns the request thread. # noqa: E501 # verify the required parameter 'stripped_uuid' is set # noqa: E501 # noqa: E501 # HTTP header `Accept` # noqa: E501 # Authentication setting # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 Changes the player skin by upload # noqa: E501 This uploads a skin to Mojang's servers. It also sets the users skin. This works on legacy counts as well. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.upload_player_skin(stripped_uuid, file, async_req=True) >>> result = thread.get() :param async_req bool :param str stripped_uuid: The player UUID without hyphens (required) :param file file: The skin image in PNG format (required) :param SkinModel model: :return: None If the method is called asynchronously, returns the request thread. # noqa: E501 # noqa: E501 # noqa: E501 Changes the player skin by upload # noqa: E501 This uploads a skin to Mojang's servers. It also sets the users skin. This works on legacy counts as well. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.upload_player_skin_with_http_info(stripped_uuid, file, async_req=True) >>> result = thread.get() :param async_req bool :param str stripped_uuid: The player UUID without hyphens (required) :param file file: The skin image in PNG format (required) :param SkinModel model: :return: None If the method is called asynchronously, returns the request thread. # noqa: E501 # verify the required parameter 'stripped_uuid' is set # noqa: E501 # verify the required parameter 'file' is set # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # HTTP header `Accept` # noqa: E501 # HTTP header `Content-Type` # noqa: E501 # noqa: E501 # Authentication setting # noqa: E501 # noqa: E501 # noqa: E501 | 1.890782 | 2 |
7day/re/Re14.py | jsjang93/joony | 0 | 6612605 | import re
print(re.search("\d","우기는 1994년에 입대하였습니다"))
print(re.search("\d+","우기는 1994년에 입대하였습니다"))
print(re.match("\d+","우기는 1994년에 입대하였습니다"))
print(re.match("\d+","1994년에 우기는 입대하였습니다"))
print(re.findall("\d+","우기는 1994년에 5월 31에 입대하였습니다"))
print(re.split("[:]+","사과 귤 : 포도 토마토"))
print(re.split("[: ]+","사과 귤 : 포도 토마토"))
print(re.sub("-","**","123-456-7890"))
| import re
print(re.search("\d","우기는 1994년에 입대하였습니다"))
print(re.search("\d+","우기는 1994년에 입대하였습니다"))
print(re.match("\d+","우기는 1994년에 입대하였습니다"))
print(re.match("\d+","1994년에 우기는 입대하였습니다"))
print(re.findall("\d+","우기는 1994년에 5월 31에 입대하였습니다"))
print(re.split("[:]+","사과 귤 : 포도 토마토"))
print(re.split("[: ]+","사과 귤 : 포도 토마토"))
print(re.sub("-","**","123-456-7890"))
| none | 1 | 3.110664 | 3 | |
msldap/commons/proxy.py | opexxx/msldap | 7 | 6612606 |
#!/usr/bin/env python3
#
# Author:
# <NAME> (@skelsec)
#
import enum
class LDAPProxyType(enum.Enum):
SOCKS5 = 'SOCKS5'
SOCKS5_SSL = 'SOCKS5_SSL'
MULTIPLEXOR = 'MULTIPLEXOR'
MULTIPLEXOR_SSL = 'MULTIPLEXOR_SSL'
class MSLDAPProxy:
def __init__(self):
self.ip = None
self.port = 1080
self.timeout = 10
self.proxy_type = None
self.username = None
self.domain = None
self.secret = None
self.secret_type = None
self.settings = {}
def __str__(self):
t = '==== MSLDAPProxy ====\r\n'
for k in self.__dict__:
t += '%s: %s\r\n' % (k, self.__dict__[k])
return t
|
#!/usr/bin/env python3
#
# Author:
# <NAME> (@skelsec)
#
import enum
class LDAPProxyType(enum.Enum):
SOCKS5 = 'SOCKS5'
SOCKS5_SSL = 'SOCKS5_SSL'
MULTIPLEXOR = 'MULTIPLEXOR'
MULTIPLEXOR_SSL = 'MULTIPLEXOR_SSL'
class MSLDAPProxy:
def __init__(self):
self.ip = None
self.port = 1080
self.timeout = 10
self.proxy_type = None
self.username = None
self.domain = None
self.secret = None
self.secret_type = None
self.settings = {}
def __str__(self):
t = '==== MSLDAPProxy ====\r\n'
for k in self.__dict__:
t += '%s: %s\r\n' % (k, self.__dict__[k])
return t
| en | 0.217151 | #!/usr/bin/env python3 # # Author: # <NAME> (@skelsec) # | 2.568233 | 3 |
platform/radio/efr32_multiphy_configurator/pro2_chip_configurator/src/si4440_modem_calc/dict2xml.py | lmnotran/gecko_sdk | 82 | 6612607 | '''
Created on Apr 9, 2013
@author: sesuskic
'''
from xml.dom.minidom import Document
from collections import OrderedDict
__all__ = ["dict2xml"]
class dict2xml(object):
def __init__(self, structure):
self.doc = Document()
if len(structure) == 1:
k = list(structure.keys())
rootName = str(k[0])
self.root = self.doc.createElement(rootName)
self.doc.appendChild(self.root)
self.build(self.root, structure[rootName])
def build(self, father, structure):
if (type(structure) == dict or
type(structure) == OrderedDict):
for k in structure:
tag = self.doc.createElement(k)
father.appendChild(tag)
self.build(tag, structure[k])
elif type(structure) == list:
tagName = father.tagName
tag = self.doc.createElement(tagName)
idx = 0
# grandFather.removeChild(father)
for l in structure:
tag = self.doc.createElement(tagName + '_{:02}'.format(idx))
self.build(tag, l)
father.appendChild(tag)
idx += 1
else:
data = str(structure)
tag = self.doc.createTextNode(data)
father.appendChild(tag)
def display(self):
return self.doc.toprettyxml(indent=" ")
| '''
Created on Apr 9, 2013
@author: sesuskic
'''
from xml.dom.minidom import Document
from collections import OrderedDict
__all__ = ["dict2xml"]
class dict2xml(object):
def __init__(self, structure):
self.doc = Document()
if len(structure) == 1:
k = list(structure.keys())
rootName = str(k[0])
self.root = self.doc.createElement(rootName)
self.doc.appendChild(self.root)
self.build(self.root, structure[rootName])
def build(self, father, structure):
if (type(structure) == dict or
type(structure) == OrderedDict):
for k in structure:
tag = self.doc.createElement(k)
father.appendChild(tag)
self.build(tag, structure[k])
elif type(structure) == list:
tagName = father.tagName
tag = self.doc.createElement(tagName)
idx = 0
# grandFather.removeChild(father)
for l in structure:
tag = self.doc.createElement(tagName + '_{:02}'.format(idx))
self.build(tag, l)
father.appendChild(tag)
idx += 1
else:
data = str(structure)
tag = self.doc.createTextNode(data)
father.appendChild(tag)
def display(self):
return self.doc.toprettyxml(indent=" ")
| en | 0.763375 | Created on Apr 9, 2013 @author: sesuskic # grandFather.removeChild(father) | 3.160775 | 3 |
sinewave_plot.py | randbrown/PyWaveTools | 1 | 6612608 | """ Generate sine wave tone and plot the wav and frequency (using FFT) """
import wavelib
import plotlib
DURATION = 1.0 # seconds
def main():
"""main function"""
# times is array of values at each time slot of the whole wav file
times = wavelib.createtimes(DURATION)
vals = wavelib.sinewave(times, wavelib.FREQ_A4)
vals = wavelib.normalize(vals)
wavelib.write_wave_file('output/sinewave1.wav', vals)
# wavelib.plot_show(times, vals)
# wavelib.fft_plot(times, vals)
plotlib.plot_wave_and_fft(times, vals)
main()
| """ Generate sine wave tone and plot the wav and frequency (using FFT) """
import wavelib
import plotlib
DURATION = 1.0 # seconds
def main():
"""main function"""
# times is array of values at each time slot of the whole wav file
times = wavelib.createtimes(DURATION)
vals = wavelib.sinewave(times, wavelib.FREQ_A4)
vals = wavelib.normalize(vals)
wavelib.write_wave_file('output/sinewave1.wav', vals)
# wavelib.plot_show(times, vals)
# wavelib.fft_plot(times, vals)
plotlib.plot_wave_and_fft(times, vals)
main()
| en | 0.704065 | Generate sine wave tone and plot the wav and frequency (using FFT) # seconds main function # times is array of values at each time slot of the whole wav file # wavelib.plot_show(times, vals) # wavelib.fft_plot(times, vals) | 3.474156 | 3 |
backandforth.py | JohnnyLeibniz/kindling-bot | 0 | 6612609 | <reponame>JohnnyLeibniz/kindling-bot
import discord
from discord.ext import commands
class Add_Remove(commands.Cog):
def _init_(self,client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print('(Add & Remove) log is ready.')
@commands.command()
async def addbranch(self,ctx):
await ctx.send('A branch has been added to the fire.')
#----------------------------
# KICKING/BANNING/UNBANNING
#----------------------------
@commands.command()
async def kick(self,ctx, member : discord.Member, *,reason=None):
await member.kick(reason=reason)
await ctx.send(f'{member.mention} has been kicked.')
@commands.command()
async def ban(self,ctx, member : discord.Member, *,reason=None):
await member.ban(reason=reason)
await ctx.send(f'{member.mention} has been banned.')
@commands.command()
async def unban(self,ctx,*,member):
banned_users = await ctx.guild.bans()
member_name,member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name,user.discriminator) == (member_name,member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'{user.name}#{user.discriminator} has been unbanned.')
#-------
# SETUP
#-------
def setup(client):
client.add_cog(Add_Remove(client)) | import discord
from discord.ext import commands
class Add_Remove(commands.Cog):
def _init_(self,client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print('(Add & Remove) log is ready.')
@commands.command()
async def addbranch(self,ctx):
await ctx.send('A branch has been added to the fire.')
#----------------------------
# KICKING/BANNING/UNBANNING
#----------------------------
@commands.command()
async def kick(self,ctx, member : discord.Member, *,reason=None):
await member.kick(reason=reason)
await ctx.send(f'{member.mention} has been kicked.')
@commands.command()
async def ban(self,ctx, member : discord.Member, *,reason=None):
await member.ban(reason=reason)
await ctx.send(f'{member.mention} has been banned.')
@commands.command()
async def unban(self,ctx,*,member):
banned_users = await ctx.guild.bans()
member_name,member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name,user.discriminator) == (member_name,member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'{user.name}#{user.discriminator} has been unbanned.')
#-------
# SETUP
#-------
def setup(client):
client.add_cog(Add_Remove(client)) | en | 0.148441 | #---------------------------- # KICKING/BANNING/UNBANNING #---------------------------- #{user.discriminator} has been unbanned.') #------- # SETUP #------- | 2.549907 | 3 |
mychevy/debug.py | pedrorobsonleao/mychevy | 41 | 6612610 | # -*- coding: utf-8 -*-
"""Console script for mychevy."""
import configparser
import logging
import click
from mychevy.mychevy import MyChevy, ServerError
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--config', '-c', type=click.File('r'),
required=True,
help="Config file with my.chevy credentials")
@click.option('--verbose', '-v', default=False, is_flag=True,
help="Run more verbose")
def main(config=None, verbose=False):
"""Console script for mychevy"""
cfile = configparser.ConfigParser()
cfile.read_file(config)
if verbose:
logging.basicConfig(level=logging.DEBUG)
page = MyChevy(cfile["default"]["user"], cfile["default"]["passwd"])
click.echo("Logging in... this takes a bit")
page.login()
page.get_cars()
click.echo("Displaying found cars")
for c in page.cars:
click.echo(c)
click.echo("Updating cars with data")
try:
page.update_cars()
click.echo("Displaying found cars with data")
for c in page.cars:
click.echo(c)
except ServerError as e:
click.echo("OnStar Network Failure: %s" % e)
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
"""Console script for mychevy."""
import configparser
import logging
import click
from mychevy.mychevy import MyChevy, ServerError
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--config', '-c', type=click.File('r'),
required=True,
help="Config file with my.chevy credentials")
@click.option('--verbose', '-v', default=False, is_flag=True,
help="Run more verbose")
def main(config=None, verbose=False):
"""Console script for mychevy"""
cfile = configparser.ConfigParser()
cfile.read_file(config)
if verbose:
logging.basicConfig(level=logging.DEBUG)
page = MyChevy(cfile["default"]["user"], cfile["default"]["passwd"])
click.echo("Logging in... this takes a bit")
page.login()
page.get_cars()
click.echo("Displaying found cars")
for c in page.cars:
click.echo(c)
click.echo("Updating cars with data")
try:
page.update_cars()
click.echo("Displaying found cars with data")
for c in page.cars:
click.echo(c)
except ServerError as e:
click.echo("OnStar Network Failure: %s" % e)
if __name__ == "__main__":
main()
| en | 0.63835 | # -*- coding: utf-8 -*- Console script for mychevy. Console script for mychevy | 2.293796 | 2 |
main.py | Haesky/projetisn | 2 | 6612611 | <filename>main.py
from tkinter import *
def run1():
fenetre.destroy()
import runaway
def run2():
fenetre.destroy()
import fall
fenetre = Tk()
Frame1 = Frame(fenetre)
photo = PhotoImage(file="ressources/runaway.png")
buttonimg = Button(Frame1, image=photo)
buttonimg.grid()
Frame1.grid()
Frame2 = Frame(fenetre)
bouton1 = Button(Frame2, text="Jouer - 1", font="Arial 30", command=run1)
bouton1.grid(row=2, column=1)
bouton2 = Button(Frame2, text="Jouer - 2", font="Arial 30", command=run2)
bouton2.grid(row=2, column=2)
bouton3 = Button(Frame2, text="Fermer", font="Arial 30",command=fenetre.quit)
bouton3.grid(row=2, column=3)
Frame2.grid()
fenetre.mainloop()
| <filename>main.py
from tkinter import *
def run1():
fenetre.destroy()
import runaway
def run2():
fenetre.destroy()
import fall
fenetre = Tk()
Frame1 = Frame(fenetre)
photo = PhotoImage(file="ressources/runaway.png")
buttonimg = Button(Frame1, image=photo)
buttonimg.grid()
Frame1.grid()
Frame2 = Frame(fenetre)
bouton1 = Button(Frame2, text="Jouer - 1", font="Arial 30", command=run1)
bouton1.grid(row=2, column=1)
bouton2 = Button(Frame2, text="Jouer - 2", font="Arial 30", command=run2)
bouton2.grid(row=2, column=2)
bouton3 = Button(Frame2, text="Fermer", font="Arial 30",command=fenetre.quit)
bouton3.grid(row=2, column=3)
Frame2.grid()
fenetre.mainloop()
| none | 1 | 3.278812 | 3 | |
tests/quara/interface/qutip/test_api.py | tknrsgym/quara | 3 | 6612612 | <filename>tests/quara/interface/qutip/test_api.py
from quara.protocol.qtomography.standard.standard_qpt import StandardQpt
from quara.protocol.qtomography.standard.standard_povmt import StandardPovmt
from quara.interface.qutip.api import (
estimate_standard_povmt_from_qutip,
estimate_standard_qpt_from_qutip,
estimate_standard_qst_from_qutip,
)
from quara.protocol.qtomography.standard.standard_qst import StandardQst
import numpy as np
import numpy.testing as npt
import pytest
from quara.interface.qutip.conversion import (
convert_state_quara_to_qutip,
convert_povm_quara_to_qutip,
convert_gate_quara_to_qutip,
)
from quara.objects.composite_system_typical import generate_composite_system
from quara.objects.state_typical import generate_state_from_name
from quara.objects.povm_typical import generate_povm_from_name
from quara.objects.gate_typical import generate_gate_from_gate_name
def get_tester_state_names_1qubit():
return ["x0", "y0", "z0", "z1"]
def get_tester_state_names_1qutrit():
return [
"01z0",
"12z0",
"02z1",
"01x0",
"01y0",
"12x0",
"12y0",
"02x0",
"02y0",
]
def get_tester_povm_names_1qubit():
return ["x", "y", "z"]
def get_tester_povm_names_1qutrit():
return ["01x3", "01y3", "z3", "12x3", "12y3", "02x3", "02y3"]
@pytest.mark.qutip
@pytest.mark.parametrize(
("mode", "num", "true_state_name", "decimal"),
[("qubit", 1, "z0", 4), ("qutrit", 1, "01z0", 4)],
)
def test_estimate_standard_qst_from_qutip(mode, num, true_state_name, decimal):
c_sys = generate_composite_system(mode, num)
true_state = generate_state_from_name(c_sys, true_state_name)
true_state_qutip = convert_state_quara_to_qutip(true_state)
get_tester_povm_names_method_name = f"get_tester_povm_names_{int(num)}{mode}"
get_tester_povm_names_method = eval(get_tester_povm_names_method_name)
tester_povm_names = get_tester_povm_names_method()
tester_povms = []
tester_povms_qutip = []
for tester_povm_name in tester_povm_names:
tester_povm = generate_povm_from_name(tester_povm_name, c_sys)
tester_povms.append(tester_povm)
tester_povms_qutip.append(convert_povm_quara_to_qutip(tester_povm))
seed = 7896
qst = StandardQst(
tester_povms, on_para_eq_constraint=True, schedules="all", seed_data=seed
)
prob_dists_arrays = qst.calc_prob_dists(true_state)
prob_dists = []
for prob_dist in prob_dists_arrays:
prob_dists.append((1, np.array(prob_dist)))
for estimator_name in ["linear", "least_squares"]:
estimated_state_qutip = estimate_standard_qst_from_qutip(
mode,
num,
tester_povms=tester_povms_qutip,
empi_dists=prob_dists,
estimator_name=estimator_name,
schedules="all",
)
npt.assert_array_almost_equal(
estimated_state_qutip.data.toarray(),
true_state_qutip.data.toarray(),
decimal=decimal,
)
@pytest.mark.qutip
@pytest.mark.parametrize(
("mode", "num", "true_povm_name", "decimal"),
[("qubit", 1, "z", 4), ("qutrit", 1, "z3", 4)],
)
def test_estimate_standard_povmt_from_qutip(mode, num, true_povm_name, decimal):
c_sys = generate_composite_system(mode, num)
true_povm = generate_povm_from_name(true_povm_name, c_sys)
true_povm_qutip = convert_povm_quara_to_qutip(true_povm)
get_tester_state_names_method_name = f"get_tester_state_names_{int(num)}{mode}"
get_tester_state_names_method = eval(get_tester_state_names_method_name)
tester_state_names = get_tester_state_names_method()
tester_states = []
tester_states_qutip = []
for tester_state_name in tester_state_names:
tester_state = generate_state_from_name(c_sys, tester_state_name)
tester_states.append(tester_state)
tester_states_qutip.append(convert_state_quara_to_qutip(tester_state))
seed = 7896
povmt = StandardPovmt(
tester_states,
true_povm.num_outcomes,
on_para_eq_constraint=True,
schedules="all",
seed_data=seed,
)
prob_dists_arrays = povmt.calc_prob_dists(true_povm)
prob_dists = []
for prob_dist in prob_dists_arrays:
prob_dists.append((1, np.array(prob_dist)))
for estimator_name in ["linear", "least_squares"]:
estimated_povm_qutip = estimate_standard_povmt_from_qutip(
mode,
num,
tester_states=tester_states_qutip,
num_outcomes=true_povm.num_outcomes,
empi_dists=prob_dists,
estimator_name=estimator_name,
schedules="all",
)
for estimated_item, true_item in zip(estimated_povm_qutip, true_povm_qutip):
npt.assert_array_almost_equal(
estimated_item.data.toarray(),
true_item.data.toarray(),
decimal=decimal,
)
@pytest.mark.qutip
@pytest.mark.parametrize(
("mode", "num", "true_gate_name", "decimal"),
[("qubit", 1, "identity", 4), ("qutrit", 1, "identity", 4)],
)
def test_estimate_standard_qpt_from_qutip(mode, num, true_gate_name, decimal):
c_sys = generate_composite_system(mode, num)
true_gate = generate_gate_from_gate_name(true_gate_name, c_sys)
true_gate_qutip = convert_gate_quara_to_qutip(true_gate)
get_tester_povm_names_method_name = f"get_tester_povm_names_{int(num)}{mode}"
get_tester_povm_names_method = eval(get_tester_povm_names_method_name)
tester_povm_names = get_tester_povm_names_method()
tester_povms = []
tester_povms_qutip = []
for tester_povm_name in tester_povm_names:
tester_povm = generate_povm_from_name(tester_povm_name, c_sys)
tester_povms.append(tester_povm)
tester_povms_qutip.append(convert_povm_quara_to_qutip(tester_povm))
get_tester_state_names_method_name = f"get_tester_state_names_{int(num)}{mode}"
get_tester_state_names_method = eval(get_tester_state_names_method_name)
tester_state_names = get_tester_state_names_method()
tester_states = []
tester_states_qutip = []
for tester_state_name in tester_state_names:
tester_state = generate_state_from_name(c_sys, tester_state_name)
tester_states.append(tester_state)
tester_states_qutip.append(convert_state_quara_to_qutip(tester_state))
seed = 7896
qpt = StandardQpt(
states=tester_states,
povms=tester_povms,
on_para_eq_constraint=True,
schedules="all",
seed_data=seed,
)
prob_dists_arrays = qpt.calc_prob_dists(true_gate)
prob_dists = []
for prob_dist in prob_dists_arrays:
prob_dists.append((1, np.array(prob_dist)))
for estimator_name in ["linear", "least_squares"]:
estimated_gate_qutip = estimate_standard_qpt_from_qutip(
mode,
num,
tester_states=tester_states_qutip,
tester_povms=tester_povms_qutip,
empi_dists=prob_dists,
estimator_name=estimator_name,
schedules="all",
)
npt.assert_array_almost_equal(
estimated_gate_qutip.data.toarray(),
true_gate_qutip.data.toarray(),
decimal=decimal,
)
| <filename>tests/quara/interface/qutip/test_api.py
from quara.protocol.qtomography.standard.standard_qpt import StandardQpt
from quara.protocol.qtomography.standard.standard_povmt import StandardPovmt
from quara.interface.qutip.api import (
estimate_standard_povmt_from_qutip,
estimate_standard_qpt_from_qutip,
estimate_standard_qst_from_qutip,
)
from quara.protocol.qtomography.standard.standard_qst import StandardQst
import numpy as np
import numpy.testing as npt
import pytest
from quara.interface.qutip.conversion import (
convert_state_quara_to_qutip,
convert_povm_quara_to_qutip,
convert_gate_quara_to_qutip,
)
from quara.objects.composite_system_typical import generate_composite_system
from quara.objects.state_typical import generate_state_from_name
from quara.objects.povm_typical import generate_povm_from_name
from quara.objects.gate_typical import generate_gate_from_gate_name
def get_tester_state_names_1qubit():
return ["x0", "y0", "z0", "z1"]
def get_tester_state_names_1qutrit():
return [
"01z0",
"12z0",
"02z1",
"01x0",
"01y0",
"12x0",
"12y0",
"02x0",
"02y0",
]
def get_tester_povm_names_1qubit():
return ["x", "y", "z"]
def get_tester_povm_names_1qutrit():
return ["01x3", "01y3", "z3", "12x3", "12y3", "02x3", "02y3"]
@pytest.mark.qutip
@pytest.mark.parametrize(
("mode", "num", "true_state_name", "decimal"),
[("qubit", 1, "z0", 4), ("qutrit", 1, "01z0", 4)],
)
def test_estimate_standard_qst_from_qutip(mode, num, true_state_name, decimal):
c_sys = generate_composite_system(mode, num)
true_state = generate_state_from_name(c_sys, true_state_name)
true_state_qutip = convert_state_quara_to_qutip(true_state)
get_tester_povm_names_method_name = f"get_tester_povm_names_{int(num)}{mode}"
get_tester_povm_names_method = eval(get_tester_povm_names_method_name)
tester_povm_names = get_tester_povm_names_method()
tester_povms = []
tester_povms_qutip = []
for tester_povm_name in tester_povm_names:
tester_povm = generate_povm_from_name(tester_povm_name, c_sys)
tester_povms.append(tester_povm)
tester_povms_qutip.append(convert_povm_quara_to_qutip(tester_povm))
seed = 7896
qst = StandardQst(
tester_povms, on_para_eq_constraint=True, schedules="all", seed_data=seed
)
prob_dists_arrays = qst.calc_prob_dists(true_state)
prob_dists = []
for prob_dist in prob_dists_arrays:
prob_dists.append((1, np.array(prob_dist)))
for estimator_name in ["linear", "least_squares"]:
estimated_state_qutip = estimate_standard_qst_from_qutip(
mode,
num,
tester_povms=tester_povms_qutip,
empi_dists=prob_dists,
estimator_name=estimator_name,
schedules="all",
)
npt.assert_array_almost_equal(
estimated_state_qutip.data.toarray(),
true_state_qutip.data.toarray(),
decimal=decimal,
)
@pytest.mark.qutip
@pytest.mark.parametrize(
("mode", "num", "true_povm_name", "decimal"),
[("qubit", 1, "z", 4), ("qutrit", 1, "z3", 4)],
)
def test_estimate_standard_povmt_from_qutip(mode, num, true_povm_name, decimal):
c_sys = generate_composite_system(mode, num)
true_povm = generate_povm_from_name(true_povm_name, c_sys)
true_povm_qutip = convert_povm_quara_to_qutip(true_povm)
get_tester_state_names_method_name = f"get_tester_state_names_{int(num)}{mode}"
get_tester_state_names_method = eval(get_tester_state_names_method_name)
tester_state_names = get_tester_state_names_method()
tester_states = []
tester_states_qutip = []
for tester_state_name in tester_state_names:
tester_state = generate_state_from_name(c_sys, tester_state_name)
tester_states.append(tester_state)
tester_states_qutip.append(convert_state_quara_to_qutip(tester_state))
seed = 7896
povmt = StandardPovmt(
tester_states,
true_povm.num_outcomes,
on_para_eq_constraint=True,
schedules="all",
seed_data=seed,
)
prob_dists_arrays = povmt.calc_prob_dists(true_povm)
prob_dists = []
for prob_dist in prob_dists_arrays:
prob_dists.append((1, np.array(prob_dist)))
for estimator_name in ["linear", "least_squares"]:
estimated_povm_qutip = estimate_standard_povmt_from_qutip(
mode,
num,
tester_states=tester_states_qutip,
num_outcomes=true_povm.num_outcomes,
empi_dists=prob_dists,
estimator_name=estimator_name,
schedules="all",
)
for estimated_item, true_item in zip(estimated_povm_qutip, true_povm_qutip):
npt.assert_array_almost_equal(
estimated_item.data.toarray(),
true_item.data.toarray(),
decimal=decimal,
)
@pytest.mark.qutip
@pytest.mark.parametrize(
("mode", "num", "true_gate_name", "decimal"),
[("qubit", 1, "identity", 4), ("qutrit", 1, "identity", 4)],
)
def test_estimate_standard_qpt_from_qutip(mode, num, true_gate_name, decimal):
c_sys = generate_composite_system(mode, num)
true_gate = generate_gate_from_gate_name(true_gate_name, c_sys)
true_gate_qutip = convert_gate_quara_to_qutip(true_gate)
get_tester_povm_names_method_name = f"get_tester_povm_names_{int(num)}{mode}"
get_tester_povm_names_method = eval(get_tester_povm_names_method_name)
tester_povm_names = get_tester_povm_names_method()
tester_povms = []
tester_povms_qutip = []
for tester_povm_name in tester_povm_names:
tester_povm = generate_povm_from_name(tester_povm_name, c_sys)
tester_povms.append(tester_povm)
tester_povms_qutip.append(convert_povm_quara_to_qutip(tester_povm))
get_tester_state_names_method_name = f"get_tester_state_names_{int(num)}{mode}"
get_tester_state_names_method = eval(get_tester_state_names_method_name)
tester_state_names = get_tester_state_names_method()
tester_states = []
tester_states_qutip = []
for tester_state_name in tester_state_names:
tester_state = generate_state_from_name(c_sys, tester_state_name)
tester_states.append(tester_state)
tester_states_qutip.append(convert_state_quara_to_qutip(tester_state))
seed = 7896
qpt = StandardQpt(
states=tester_states,
povms=tester_povms,
on_para_eq_constraint=True,
schedules="all",
seed_data=seed,
)
prob_dists_arrays = qpt.calc_prob_dists(true_gate)
prob_dists = []
for prob_dist in prob_dists_arrays:
prob_dists.append((1, np.array(prob_dist)))
for estimator_name in ["linear", "least_squares"]:
estimated_gate_qutip = estimate_standard_qpt_from_qutip(
mode,
num,
tester_states=tester_states_qutip,
tester_povms=tester_povms_qutip,
empi_dists=prob_dists,
estimator_name=estimator_name,
schedules="all",
)
npt.assert_array_almost_equal(
estimated_gate_qutip.data.toarray(),
true_gate_qutip.data.toarray(),
decimal=decimal,
)
| none | 1 | 1.833479 | 2 | |
jel/utils/common.py | izuna385/jel | 6 | 6612613 | <filename>jel/utils/common.py
import json
import spacy
import logging
from typing import Tuple, List, Dict
logger = logging.getLogger(__name__)
logger.debug(msg='loading ja_core_news_md')
nlp = spacy.load('ja_core_news_md')
logger.debug(msg='loading ja_core_news_md finished.')
def jopen(file_path: str):
with open(file_path, 'r') as f:
j = json.load(f)
return j
def return_ner_span(text: str) -> List[Dict]:
'''
:param text:
:return:
'''
doc = nlp(text=text)
ents = [{'text': ent.text,
'label': ent.label_,
'span': (ent.start_char, ent.end_char)} for ent in doc.ents]
return ents | <filename>jel/utils/common.py
import json
import spacy
import logging
from typing import Tuple, List, Dict
logger = logging.getLogger(__name__)
logger.debug(msg='loading ja_core_news_md')
nlp = spacy.load('ja_core_news_md')
logger.debug(msg='loading ja_core_news_md finished.')
def jopen(file_path: str):
with open(file_path, 'r') as f:
j = json.load(f)
return j
def return_ner_span(text: str) -> List[Dict]:
'''
:param text:
:return:
'''
doc = nlp(text=text)
ents = [{'text': ent.text,
'label': ent.label_,
'span': (ent.start_char, ent.end_char)} for ent in doc.ents]
return ents | en | 0.363552 | :param text: :return: | 2.49562 | 2 |
dark_visual/genCode/gen.py | pylixm/darker | 1 | 6612614 | <filename>dark_visual/genCode/gen.py
# coding=utf-8
__author__ = 'fang'
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import random
# 随机字母
def rndChar():
return chr(random.randint(65, 90))
# 背景颜色随机:
def rndBgColor():
return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))
# 字体颜色随机:
def rndFontColor():
return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))
# 生成二维码
def genTDCode():
width = 60 *4
height = 60
img = Image.new('RGB', (width, height), 0xffffff)
font = ImageFont.truetype('Libian.ttc', 50) # 创建Font对象
draw = ImageDraw.Draw(img) # 创建Draw对象
# 填充每一个像素
for w in range(width):
for h in range(height):
draw.point((w, h), fill=rndBgColor())
# 打印文字
for t in range(5):
draw.text(
(50 * t + 10, 0),
rndChar(),
font=font,
fill=rndFontColor()
# fill=0x000000 # 纯黑
)
# 模糊
img = img.filter(ImageFilter.BLUR)
img.save('code.jpg', 'jpeg')
img.show()
if __name__ == '__main__':
genTDCode() | <filename>dark_visual/genCode/gen.py
# coding=utf-8
__author__ = 'fang'
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import random
# 随机字母
def rndChar():
return chr(random.randint(65, 90))
# 背景颜色随机:
def rndBgColor():
return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))
# 字体颜色随机:
def rndFontColor():
return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))
# 生成二维码
def genTDCode():
width = 60 *4
height = 60
img = Image.new('RGB', (width, height), 0xffffff)
font = ImageFont.truetype('Libian.ttc', 50) # 创建Font对象
draw = ImageDraw.Draw(img) # 创建Draw对象
# 填充每一个像素
for w in range(width):
for h in range(height):
draw.point((w, h), fill=rndBgColor())
# 打印文字
for t in range(5):
draw.text(
(50 * t + 10, 0),
rndChar(),
font=font,
fill=rndFontColor()
# fill=0x000000 # 纯黑
)
# 模糊
img = img.filter(ImageFilter.BLUR)
img.save('code.jpg', 'jpeg')
img.show()
if __name__ == '__main__':
genTDCode() | zh | 0.856096 | # coding=utf-8 # 随机字母 # 背景颜色随机: # 字体颜色随机: # 生成二维码 # 创建Font对象 # 创建Draw对象 # 填充每一个像素 # 打印文字 # fill=0x000000 # 纯黑 # 模糊 | 2.420157 | 2 |
tools/combine.py | after5cst/BG2AI | 1 | 6612615 | <reponame>after5cst/BG2AI<filename>tools/combine.py
#! /usr/bin/env python3
import argparse
import collections
from copy import deepcopy
import json
import logging
import os
from pprint import pprint, pformat
import re
import shutil
import sys
from substituter import Substituter
from globals import tools_dir, project_name
def replace_single_quotes_with_double_outside_comment(data: str) -> str:
""""
Replace single quotes in non-comment with double quotes.
Returns the string with replacements
"""
logging.debug("rsq: {}".format(pformat(data)))
r = re.compile(r"^(.*)\/\/.*$|^(.*)$", re.MULTILINE)
matches = [m.span(m.lastindex) for m in r.finditer(data)]
for start, end in matches:
before = data[:start]
mid = data[start:end]
after = data[end:]
mid = mid.replace("'", '"')
data = before + mid + after
return data
def convert_actions_to_text(weight: int, actions: list, fields_in: dict) -> list:
"""
Convert a list of actions into a list of strings.
:param weight: The weight of the response block.
:param actions: The list of actions for that weight.
:param fields_in: A dict of field values.
:return: a list of strings.
"""
lines = ["RESPONSE #{}".format(weight)]
for action in actions:
if isinstance(action, dict):
assert 1 == len(action), "Detected dict with multiple trigger keys"
key, value = action.popitem()
if value:
value.update(fields_in)
else:
value = fields_in
template = Substituter(key)
template_lines = template.expand(value)
for template_line in template_lines:
lines.append('\t' + template_line)
elif isinstance(action, str):
for key, value in fields_in.items():
search_term = "<{}>".format(key)
# logging.debug("Replacing '{}' with '{}' in '{}'".format(
# search_term, value, action))
action = action.replace(search_term, value)
lines.append('\t' + action)
else:
assert False, "Action contains unknown type"
out = list()
for line in lines:
line = '\t' + replace_single_quotes_with_double_outside_comment(line)
out.append(line)
return out
def convert_triggers_to_text(source_in: list, fields_in: dict, in_or: bool=False) -> list:
"""
Convert a list of triggers into a list of strings.
:param source_in: The list of triggers from the JSON.
:param fields_in: A dict of substitutable fields.
:param in_or: If True, then processing statements from an OR
:return: a list of strings.
"""
lines = list()
deque = collections.deque(source_in)
while deque:
item = deque.popleft()
if isinstance(item, list):
logging.debug("T2T: LIST {}".format(pformat(item)))
logging.debug("Converting OR block to text")
assert not in_or, "Nested OR block found"
# A list within a list is an OR block.
or_lines = convert_triggers_to_text(item, fields_in, True)
or_statement = "OR({})".format(len(or_lines))
while or_lines:
deque.appendleft(or_lines.pop())
deque.appendleft(or_statement)
elif isinstance(item, dict):
logging.debug("T2T: DICT {}".format(pformat(item)))
assert 1 == len(item), "Detected dict with multiple trigger keys"
key, value = item.popitem()
if value:
value.update(fields_in)
else:
value = fields_in
data = Substituter(key).expand(value)
while data:
deque.appendleft(data.pop())
elif isinstance(item, str):
logging.debug("T2T: STR {}".format(pformat(item)))
for key, value in fields_in.items():
search_term = "<{}>".format(key)
# logging.debug("Replacing '{}' with '{}' in '{}'".format(
# search_term, value, action))
item = item.replace(search_term, value)
lines += [item]
else:
assert False, "Trigger contains unknown type"
logging.debug("T2T: END {}".format(pformat(item)))
out = list()
for line in lines:
line = '\t' + replace_single_quotes_with_double_outside_comment(line)
out.append(line)
return out
def convert_json_to_baf(source: dict) ->str:
"""
Return a BAF string that represents the JSON provided.
"""
if 1 < len(source["fields"]):
if "name" in source:
logging.info ("Combining multi-part {} ({})".format(
source["name"], len(source["fields"])
))
else:
logging.info ("Combining multi-part <unnamed> ({})".format(
len(source["fields"])
))
else:
if "name" in source:
logging.info ("Combining single-part {} ({})".format(
source["name"], len(source["fields"])
))
else:
logging.info ("Combining single-part <unnamed> ({})".format(
len(source["fields"])
))
result = ""
for fields in source["fields"]:
fields = deepcopy(fields)
logging.debug("Handling fields {}".format(pformat(fields)))
out = ["IF"] + convert_triggers_to_text(
deepcopy(source["IF"]), fields)
out.append("THEN")
for item in source["THEN"]:
item = deepcopy(item)
assert 1 == len(item), "Detected dict with multiple action keys"
key, value = item.popitem()
weight = int(key)
out = out + convert_actions_to_text(weight, value, fields)
out.append("END")
result = result + '\n'.join(out) + '\n\n'
return result
def combine_file(source_dir: str, target_file: str):
"""Take snippets and put them back together"""
logging.info("Sorting directory '{}'".format(source_dir))
files = []
for file in os.listdir(source_dir):
file = os.path.join(source_dir, file)
logging.debug("Examining '{}'".format(file))
if os.path.isfile(file) and file.endswith(".json"):
files.append(file)
if 0 == len(files):
logging.warning("No files found for combine")
return
files.sort()
logging.debug("Writing file '{}'".format(target_file))
with open(target_file, "w") as fout:
for file in files:
with open(file) as fin:
logging.info("Processing file '{}'".format(file))
# fout.write("// {}\n".format(file))
# data = fin.read()
data = convert_json_to_baf(json.load(fin))
fout.write(data)
if __name__ == "__main__":
search_dir = os.path.join(tools_dir, "..", project_name)
parser = argparse.ArgumentParser()
parser.add_argument('--auto_delete', action='store_true', default=True)
parser.add_argument('-v', '--verbose', action='count', default=0)
parser.add_argument('-d', '--search_dir', default=search_dir)
args = parser.parse_args()
if args.verbose == 0:
level = logging.WARNING
elif args.verbose == 1:
level = logging.INFO
else:
level = logging.DEBUG
logging.basicConfig(stream=sys.stdout, level=level)
logging.info("Verbosity = {}".format(logging.getLevelName(level)))
logging.info("SearchDir = '{}'".format(search_dir))
targets = []
for file_name in os.listdir(args.search_dir):
if file_name.lower().endswith('.baf'):
file_path = os.path.realpath(os.path.join(args.search_dir, file_name))
targets.append(file_path)
for target in targets:
source = os.path.splitext(target)[0]
logging.info("Source = '{}'".format(source))
logging.info("Target = '{}'".format(target))
combine_file(source, target)
| #! /usr/bin/env python3
import argparse
import collections
from copy import deepcopy
import json
import logging
import os
from pprint import pprint, pformat
import re
import shutil
import sys
from substituter import Substituter
from globals import tools_dir, project_name
def replace_single_quotes_with_double_outside_comment(data: str) -> str:
""""
Replace single quotes in non-comment with double quotes.
Returns the string with replacements
"""
logging.debug("rsq: {}".format(pformat(data)))
r = re.compile(r"^(.*)\/\/.*$|^(.*)$", re.MULTILINE)
matches = [m.span(m.lastindex) for m in r.finditer(data)]
for start, end in matches:
before = data[:start]
mid = data[start:end]
after = data[end:]
mid = mid.replace("'", '"')
data = before + mid + after
return data
def convert_actions_to_text(weight: int, actions: list, fields_in: dict) -> list:
"""
Convert a list of actions into a list of strings.
:param weight: The weight of the response block.
:param actions: The list of actions for that weight.
:param fields_in: A dict of field values.
:return: a list of strings.
"""
lines = ["RESPONSE #{}".format(weight)]
for action in actions:
if isinstance(action, dict):
assert 1 == len(action), "Detected dict with multiple trigger keys"
key, value = action.popitem()
if value:
value.update(fields_in)
else:
value = fields_in
template = Substituter(key)
template_lines = template.expand(value)
for template_line in template_lines:
lines.append('\t' + template_line)
elif isinstance(action, str):
for key, value in fields_in.items():
search_term = "<{}>".format(key)
# logging.debug("Replacing '{}' with '{}' in '{}'".format(
# search_term, value, action))
action = action.replace(search_term, value)
lines.append('\t' + action)
else:
assert False, "Action contains unknown type"
out = list()
for line in lines:
line = '\t' + replace_single_quotes_with_double_outside_comment(line)
out.append(line)
return out
def convert_triggers_to_text(source_in: list, fields_in: dict, in_or: bool=False) -> list:
"""
Convert a list of triggers into a list of strings.
:param source_in: The list of triggers from the JSON.
:param fields_in: A dict of substitutable fields.
:param in_or: If True, then processing statements from an OR
:return: a list of strings.
"""
lines = list()
deque = collections.deque(source_in)
while deque:
item = deque.popleft()
if isinstance(item, list):
logging.debug("T2T: LIST {}".format(pformat(item)))
logging.debug("Converting OR block to text")
assert not in_or, "Nested OR block found"
# A list within a list is an OR block.
or_lines = convert_triggers_to_text(item, fields_in, True)
or_statement = "OR({})".format(len(or_lines))
while or_lines:
deque.appendleft(or_lines.pop())
deque.appendleft(or_statement)
elif isinstance(item, dict):
logging.debug("T2T: DICT {}".format(pformat(item)))
assert 1 == len(item), "Detected dict with multiple trigger keys"
key, value = item.popitem()
if value:
value.update(fields_in)
else:
value = fields_in
data = Substituter(key).expand(value)
while data:
deque.appendleft(data.pop())
elif isinstance(item, str):
logging.debug("T2T: STR {}".format(pformat(item)))
for key, value in fields_in.items():
search_term = "<{}>".format(key)
# logging.debug("Replacing '{}' with '{}' in '{}'".format(
# search_term, value, action))
item = item.replace(search_term, value)
lines += [item]
else:
assert False, "Trigger contains unknown type"
logging.debug("T2T: END {}".format(pformat(item)))
out = list()
for line in lines:
line = '\t' + replace_single_quotes_with_double_outside_comment(line)
out.append(line)
return out
def convert_json_to_baf(source: dict) ->str:
"""
Return a BAF string that represents the JSON provided.
"""
if 1 < len(source["fields"]):
if "name" in source:
logging.info ("Combining multi-part {} ({})".format(
source["name"], len(source["fields"])
))
else:
logging.info ("Combining multi-part <unnamed> ({})".format(
len(source["fields"])
))
else:
if "name" in source:
logging.info ("Combining single-part {} ({})".format(
source["name"], len(source["fields"])
))
else:
logging.info ("Combining single-part <unnamed> ({})".format(
len(source["fields"])
))
result = ""
for fields in source["fields"]:
fields = deepcopy(fields)
logging.debug("Handling fields {}".format(pformat(fields)))
out = ["IF"] + convert_triggers_to_text(
deepcopy(source["IF"]), fields)
out.append("THEN")
for item in source["THEN"]:
item = deepcopy(item)
assert 1 == len(item), "Detected dict with multiple action keys"
key, value = item.popitem()
weight = int(key)
out = out + convert_actions_to_text(weight, value, fields)
out.append("END")
result = result + '\n'.join(out) + '\n\n'
return result
def combine_file(source_dir: str, target_file: str):
"""Take snippets and put them back together"""
logging.info("Sorting directory '{}'".format(source_dir))
files = []
for file in os.listdir(source_dir):
file = os.path.join(source_dir, file)
logging.debug("Examining '{}'".format(file))
if os.path.isfile(file) and file.endswith(".json"):
files.append(file)
if 0 == len(files):
logging.warning("No files found for combine")
return
files.sort()
logging.debug("Writing file '{}'".format(target_file))
with open(target_file, "w") as fout:
for file in files:
with open(file) as fin:
logging.info("Processing file '{}'".format(file))
# fout.write("// {}\n".format(file))
# data = fin.read()
data = convert_json_to_baf(json.load(fin))
fout.write(data)
if __name__ == "__main__":
search_dir = os.path.join(tools_dir, "..", project_name)
parser = argparse.ArgumentParser()
parser.add_argument('--auto_delete', action='store_true', default=True)
parser.add_argument('-v', '--verbose', action='count', default=0)
parser.add_argument('-d', '--search_dir', default=search_dir)
args = parser.parse_args()
if args.verbose == 0:
level = logging.WARNING
elif args.verbose == 1:
level = logging.INFO
else:
level = logging.DEBUG
logging.basicConfig(stream=sys.stdout, level=level)
logging.info("Verbosity = {}".format(logging.getLevelName(level)))
logging.info("SearchDir = '{}'".format(search_dir))
targets = []
for file_name in os.listdir(args.search_dir):
if file_name.lower().endswith('.baf'):
file_path = os.path.realpath(os.path.join(args.search_dir, file_name))
targets.append(file_path)
for target in targets:
source = os.path.splitext(target)[0]
logging.info("Source = '{}'".format(source))
logging.info("Target = '{}'".format(target))
combine_file(source, target) | en | 0.730618 | #! /usr/bin/env python3 " Replace single quotes in non-comment with double quotes. Returns the string with replacements Convert a list of actions into a list of strings. :param weight: The weight of the response block. :param actions: The list of actions for that weight. :param fields_in: A dict of field values. :return: a list of strings. #{}".format(weight)] # logging.debug("Replacing '{}' with '{}' in '{}'".format( # search_term, value, action)) Convert a list of triggers into a list of strings. :param source_in: The list of triggers from the JSON. :param fields_in: A dict of substitutable fields. :param in_or: If True, then processing statements from an OR :return: a list of strings. # A list within a list is an OR block. # logging.debug("Replacing '{}' with '{}' in '{}'".format( # search_term, value, action)) Return a BAF string that represents the JSON provided. Take snippets and put them back together # fout.write("// {}\n".format(file)) # data = fin.read() | 2.769121 | 3 |
technical_indicators2.py | M5era/CNN-for-trading | 1 | 6612616 |
from ta.trend import *
from ta.volatility import *
from ta.momentum import ROCIndicator
from ta.momentum import RSIIndicator
from ta.momentum import WilliamsRIndicator
from ta.volatility import BollingerBands
from ta.volume import MFIIndicator
from ta.volume import ChaikinMoneyFlowIndicator
from ta.trend import WMAIndicator
from ta.trend import TRIXIndicator
from ta.trend import DPOIndicator
from ta.trend import KSTIndicator
from ta.trend import ADXIndicator
from ta.volume import ForceIndexIndicator
from ta.volume import EaseOfMovementIndicator
from ta.volatility import AverageTrueRange
import time
from stockstats import StockDataFrame as sdf
from tqdm.auto import tqdm
import numpy as np
# Class setup indicators with ta library:
class TechnicalIndicator():
def __init__(self, df):
self.df = df
# TODO initialize df here
self.get_MACD()
def get_roc(self, col_name: str, window: int):
indicator_roc = ROCIndicator(col_name, window)
self.df['roc_{}_{}'.format(window, col_name)] = indicator_roc.roc()
def get_rsi(self, col_name: str, window: int):
indicator_rsi = RSIIndicator(col_name, window)
self.df['rsi_{}_{}'.format(window, col_name)] = indicator_rsi.rsi()
def get_mfi(self, high: str, low: str, close: str, volume: str, window: int):
indicator_mfi = MFIIndicator(high, low, close, volume, window)
self.df['mfi_{}'.format(window)] = indicator_mfi.money_flow_index()
def get_cmf(self, high: str, low: str, close: str, volume: str, window: int):
indicator_cmf = ChaikinMoneyFlowIndicator(high, low, close, volume, window)
self.df['cmf_{}'.format(window)] = indicator_cmf.chaikin_money_flow()
def get_wma(self, col_name: str, window: int):
indicator_wma = WMAIndicator(col_name, window)
self.df['wma_{}_{}'.format(window, col_name)] = indicator_wma.wma()
def get_trix(self, close: str, window: int):
indicator_trix = TRIXIndicator(close, window)
self.df['trix_{}'.format(window)] = indicator_trix.trix()
def get_dpo(self, close: str, window: int):
indicator_dpo = DPOIndicator(close, window)
self.df['dpo_{}'.format(window)] = indicator_dpo.dpo()
def get_kst(self, close: str, roc1: int, roc2: int, roc3: int, roc4: int, window1: int, window2: int, window3: int,
window4: int, nsig: int):
indicator_kst = KSTIndicator(close, roc1, roc2, roc3, roc4, window1, window2, window3, window4, nsig)
self.df['kst'] = indicator_kst.kst()
def get_adx(self, high: str, low: str, close: str, window: int):
indicator_adx = ADXIndicator(high, low, close, window)
self.df['adx_{}'.format(window)] = indicator_adx.adx()
def get_fi(self, close: str, volume: str, window: int):
indicator_fi = ForceIndexIndicator(close, volume, window)
self.df['fi_{}'.format(window)] = indicator_fi.force_index()
def get_emv(self, high: str, low: str, volume: str, window: int):
indicator_emv = EaseOfMovementIndicator(high, low, volume, window)
self.df['emv_{}'.format(window)] = indicator_emv.ease_of_movement()
def get_bb(self, close: str, window: int):
indicator_bb = BollingerBands(close, window)
self.df['bb_bbm'] = indicator_bb.bollinger_mavg()
self.df['bb_bbh'] = indicator_bb.bollinger_hband()
self.df['bb_bbl'] = indicator_bb.bollinger_lband()
self.df['bb_bbhi'] = indicator_bb.bollinger_hband_indicator()
self.df['bb_bbli'] = indicator_bb.bollinger_lband_indicator()
self.df['bb_bbhi'] = indicator_bb.bollinger_hband()
self.df['bb_bbw'] = indicator_bb.bollinger_wband()
self.df['bb_bbp'] = indicator_bb.bollinger_pband()
def get_atr(self, high: str, low: str, close: str, window: int):
indicator_atr = AverageTrueRange(high, low, close, window)
self.df['atr_{}'.format(window)] = indicator_atr.average_true_range()
def get_williamR(self, col_name: str, intervals: int):
"""
both libs gave same result
Momentum indicator
"""
stime = time.time()
print("Calculating WilliamR")
# df_ss = sdf.retype(df)
for i in tqdm(intervals):
# df['wr_'+str(i)] = df_ss['wr_'+str(i)]
self.df["wr_" + str(i)] = WilliamsRIndicator(self.df['high'], self.df['low'], self.df['close'], i, fillna=True).williams_r()
def get_MACD(self):
"""
Not used
Same for both
calculated for same 12 and 26 periods on close only. Not different periods.
creates colums macd, macds, macdh
"""
print("Calculating MACD")
df_ss = sdf.retype(self.df)
self.df['macd'] = df_ss['macd']
del self.df['close_12_ema']
del self.df['close_26_ema']
def get_SMA(self, col_name: str, intervals: int):
"""
Momentum indicator
"""
stime = time.time()
print("Calculating SMA")
df_ss = sdf.retype(self.df)
for i in tqdm(intervals):
self.df[col_name + '_sma_' + str(i)] = df_ss[col_name + '_' + str(i) + '_sma']
del self.df[col_name + '_' + str(i) + '_sma']
def get_EMA(self, col_name: str, intervals: int): # not working?
"""
Needs validation
Momentum indicator
"""
stime = time.time()
print("Calculating EMA")
df_ss = sdf.retype(self.df)
for i in tqdm(intervals):
self.df['ema_' + str(i)] = df_ss[col_name + '_' + str(i) + '_ema']
del self.df[col_name + '_' + str(i) + '_ema']
# df["ema_"+str(intervals[0])+'_1'] = ema_indicator(df['close'], i, fillna=True)
def get_CMO(self, col_name: str, intervals: int):
"""
Chande Momentum Oscillator
As per https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/cmo
CMO = 100 * ((Sum(ups) - Sum(downs))/ ( (Sum(ups) + Sum(downs) ) )
range = +100 to -100
params: df -> dataframe with financial instrument history
col_name -> column name for which CMO is to be calculated
intervals -> list of periods for which to calculated
return: None (adds the result in a column)
"""
print("Calculating CMO")
stime = time.time()
def calculate_CMO(series, period):
# num_gains = (series >= 0).sum()
# num_losses = (series < 0).sum()
sum_gains = series[series >= 0].sum()
sum_losses = np.abs(series[series < 0].sum())
cmo = 100 * ((sum_gains - sum_losses) / (sum_gains + sum_losses))
return np.round(cmo, 3)
diff = self.df[col_name].diff()[1:] # skip na
for period in tqdm(intervals):
self.df['cmo_' + str(period)] = np.nan
res = diff.rolling(period).apply(calculate_CMO, args=(period,), raw=False)
self.df['cmo_' + str(period)][1:] = res
def get_WMA(self, col_name, intervals, hma_step=0):
"""
Momentum indicator
"""
stime = time.time()
if (hma_step == 0):
# don't show progress for internal WMA calculation for HMA
print("Calculating WMA")
def wavg(rolling_prices, period):
weights = pd.Series(range(1, period + 1))
return np.multiply(rolling_prices.values, weights.values).sum() / weights.sum()
temp_col_count_dict = {}
for i in tqdm(intervals, disable=(hma_step != 0)):
res = self.df[col_name].rolling(i).apply(wavg, args=(i,), raw=False)
# print("interval {} has unique values {}".format(i, res.unique()))
if hma_step == 0:
self.df['wma_' + str(i)] = res
elif hma_step == 1:
if 'hma_wma_' + str(i) in temp_col_count_dict.keys():
temp_col_count_dict['hma_wma_' + str(i)] = temp_col_count_dict['hma_wma_' + str(i)] + 1
else:
temp_col_count_dict['hma_wma_' + str(i)] = 0
# after halving the periods and rounding, there may be two intervals with same value e.g.
# 2.6 & 2.8 both would lead to same value (3) after rounding. So save as diff columns
self.df['hma_wma_' + str(i) + '_' + str(temp_col_count_dict['hma_wma_' + str(i)])] = 2 * res
elif hma_step == 3:
import re
expr = r"^hma_[0-9]{1}"
columns = list(self.df.columns)
# print("searching", expr, "in", columns, "res=", list(filter(re.compile(expr).search, columns)))
self.df['hma_' + str(len(list(filter(re.compile(expr).search, columns))))] = res
def get_HMA(self, col_name: str, intervals: int):
import re
stime = time.time()
print("Calculating HMA")
expr = r"^wma_.*"
if len(list(filter(re.compile(expr).search, list(self.df.columns)))) > 0:
print("WMA calculated already. Proceed with HMA")
else:
print("Need WMA first...")
self.get_WMA(col_name, intervals)
intervals_half = np.round([i / 2 for i in intervals]).astype(int)
# step 1 = WMA for interval/2
# this creates cols with prefix 'hma_wma_*'
self.get_WMA(col_name, intervals_half, 1)
# print("step 1 done", list(df.columns))
# step 2 = step 1 - WMA
columns = list(self.df.columns)
expr = r"^hma_wma.*"
hma_wma_cols = list(filter(re.compile(expr).search, columns))
rest_cols = [x for x in columns if x not in hma_wma_cols]
expr = r"^wma.*"
wma_cols = list(filter(re.compile(expr).search, rest_cols))
self.df[hma_wma_cols] = self.df[hma_wma_cols].sub(self.df[wma_cols].values,
fill_value=0) # .rename(index=str, columns={"close": "col1", "rsi_6": "col2"})
# df[0:10].copy().reset_index(drop=True).merge(temp.reset_index(drop=True), left_index=True, right_index=True)
# step 3 = WMA(step 2, interval = sqrt(n))
intervals_sqrt = np.round([np.sqrt(i) for i in intervals]).astype(int)
for i, col in tqdm(enumerate(hma_wma_cols)):
# print("step 3", col, intervals_sqrt[i])
self.get_WMA(col, [intervals_sqrt[i]], 3)
self.df.drop(columns=hma_wma_cols, inplace=True)
def get_CCI(self, col_name: str, intervals: int):
print("Calculating CCI")
for i in tqdm(intervals):
self.df['cci_' + str(i)] = cci(self.df['high'], self.df['low'], self.df['close'], i, fillna=True)
|
from ta.trend import *
from ta.volatility import *
from ta.momentum import ROCIndicator
from ta.momentum import RSIIndicator
from ta.momentum import WilliamsRIndicator
from ta.volatility import BollingerBands
from ta.volume import MFIIndicator
from ta.volume import ChaikinMoneyFlowIndicator
from ta.trend import WMAIndicator
from ta.trend import TRIXIndicator
from ta.trend import DPOIndicator
from ta.trend import KSTIndicator
from ta.trend import ADXIndicator
from ta.volume import ForceIndexIndicator
from ta.volume import EaseOfMovementIndicator
from ta.volatility import AverageTrueRange
import time
from stockstats import StockDataFrame as sdf
from tqdm.auto import tqdm
import numpy as np
# Class setup indicators with ta library:
class TechnicalIndicator():
def __init__(self, df):
self.df = df
# TODO initialize df here
self.get_MACD()
def get_roc(self, col_name: str, window: int):
indicator_roc = ROCIndicator(col_name, window)
self.df['roc_{}_{}'.format(window, col_name)] = indicator_roc.roc()
def get_rsi(self, col_name: str, window: int):
indicator_rsi = RSIIndicator(col_name, window)
self.df['rsi_{}_{}'.format(window, col_name)] = indicator_rsi.rsi()
def get_mfi(self, high: str, low: str, close: str, volume: str, window: int):
indicator_mfi = MFIIndicator(high, low, close, volume, window)
self.df['mfi_{}'.format(window)] = indicator_mfi.money_flow_index()
def get_cmf(self, high: str, low: str, close: str, volume: str, window: int):
indicator_cmf = ChaikinMoneyFlowIndicator(high, low, close, volume, window)
self.df['cmf_{}'.format(window)] = indicator_cmf.chaikin_money_flow()
def get_wma(self, col_name: str, window: int):
indicator_wma = WMAIndicator(col_name, window)
self.df['wma_{}_{}'.format(window, col_name)] = indicator_wma.wma()
def get_trix(self, close: str, window: int):
indicator_trix = TRIXIndicator(close, window)
self.df['trix_{}'.format(window)] = indicator_trix.trix()
def get_dpo(self, close: str, window: int):
indicator_dpo = DPOIndicator(close, window)
self.df['dpo_{}'.format(window)] = indicator_dpo.dpo()
def get_kst(self, close: str, roc1: int, roc2: int, roc3: int, roc4: int, window1: int, window2: int, window3: int,
window4: int, nsig: int):
indicator_kst = KSTIndicator(close, roc1, roc2, roc3, roc4, window1, window2, window3, window4, nsig)
self.df['kst'] = indicator_kst.kst()
def get_adx(self, high: str, low: str, close: str, window: int):
indicator_adx = ADXIndicator(high, low, close, window)
self.df['adx_{}'.format(window)] = indicator_adx.adx()
def get_fi(self, close: str, volume: str, window: int):
indicator_fi = ForceIndexIndicator(close, volume, window)
self.df['fi_{}'.format(window)] = indicator_fi.force_index()
def get_emv(self, high: str, low: str, volume: str, window: int):
indicator_emv = EaseOfMovementIndicator(high, low, volume, window)
self.df['emv_{}'.format(window)] = indicator_emv.ease_of_movement()
def get_bb(self, close: str, window: int):
indicator_bb = BollingerBands(close, window)
self.df['bb_bbm'] = indicator_bb.bollinger_mavg()
self.df['bb_bbh'] = indicator_bb.bollinger_hband()
self.df['bb_bbl'] = indicator_bb.bollinger_lband()
self.df['bb_bbhi'] = indicator_bb.bollinger_hband_indicator()
self.df['bb_bbli'] = indicator_bb.bollinger_lband_indicator()
self.df['bb_bbhi'] = indicator_bb.bollinger_hband()
self.df['bb_bbw'] = indicator_bb.bollinger_wband()
self.df['bb_bbp'] = indicator_bb.bollinger_pband()
def get_atr(self, high: str, low: str, close: str, window: int):
indicator_atr = AverageTrueRange(high, low, close, window)
self.df['atr_{}'.format(window)] = indicator_atr.average_true_range()
def get_williamR(self, col_name: str, intervals: int):
"""
both libs gave same result
Momentum indicator
"""
stime = time.time()
print("Calculating WilliamR")
# df_ss = sdf.retype(df)
for i in tqdm(intervals):
# df['wr_'+str(i)] = df_ss['wr_'+str(i)]
self.df["wr_" + str(i)] = WilliamsRIndicator(self.df['high'], self.df['low'], self.df['close'], i, fillna=True).williams_r()
def get_MACD(self):
"""
Not used
Same for both
calculated for same 12 and 26 periods on close only. Not different periods.
creates colums macd, macds, macdh
"""
print("Calculating MACD")
df_ss = sdf.retype(self.df)
self.df['macd'] = df_ss['macd']
del self.df['close_12_ema']
del self.df['close_26_ema']
def get_SMA(self, col_name: str, intervals: int):
"""
Momentum indicator
"""
stime = time.time()
print("Calculating SMA")
df_ss = sdf.retype(self.df)
for i in tqdm(intervals):
self.df[col_name + '_sma_' + str(i)] = df_ss[col_name + '_' + str(i) + '_sma']
del self.df[col_name + '_' + str(i) + '_sma']
def get_EMA(self, col_name: str, intervals: int): # not working?
"""
Needs validation
Momentum indicator
"""
stime = time.time()
print("Calculating EMA")
df_ss = sdf.retype(self.df)
for i in tqdm(intervals):
self.df['ema_' + str(i)] = df_ss[col_name + '_' + str(i) + '_ema']
del self.df[col_name + '_' + str(i) + '_ema']
# df["ema_"+str(intervals[0])+'_1'] = ema_indicator(df['close'], i, fillna=True)
def get_CMO(self, col_name: str, intervals: int):
"""
Chande Momentum Oscillator
As per https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/cmo
CMO = 100 * ((Sum(ups) - Sum(downs))/ ( (Sum(ups) + Sum(downs) ) )
range = +100 to -100
params: df -> dataframe with financial instrument history
col_name -> column name for which CMO is to be calculated
intervals -> list of periods for which to calculated
return: None (adds the result in a column)
"""
print("Calculating CMO")
stime = time.time()
def calculate_CMO(series, period):
# num_gains = (series >= 0).sum()
# num_losses = (series < 0).sum()
sum_gains = series[series >= 0].sum()
sum_losses = np.abs(series[series < 0].sum())
cmo = 100 * ((sum_gains - sum_losses) / (sum_gains + sum_losses))
return np.round(cmo, 3)
diff = self.df[col_name].diff()[1:] # skip na
for period in tqdm(intervals):
self.df['cmo_' + str(period)] = np.nan
res = diff.rolling(period).apply(calculate_CMO, args=(period,), raw=False)
self.df['cmo_' + str(period)][1:] = res
def get_WMA(self, col_name, intervals, hma_step=0):
"""
Momentum indicator
"""
stime = time.time()
if (hma_step == 0):
# don't show progress for internal WMA calculation for HMA
print("Calculating WMA")
def wavg(rolling_prices, period):
weights = pd.Series(range(1, period + 1))
return np.multiply(rolling_prices.values, weights.values).sum() / weights.sum()
temp_col_count_dict = {}
for i in tqdm(intervals, disable=(hma_step != 0)):
res = self.df[col_name].rolling(i).apply(wavg, args=(i,), raw=False)
# print("interval {} has unique values {}".format(i, res.unique()))
if hma_step == 0:
self.df['wma_' + str(i)] = res
elif hma_step == 1:
if 'hma_wma_' + str(i) in temp_col_count_dict.keys():
temp_col_count_dict['hma_wma_' + str(i)] = temp_col_count_dict['hma_wma_' + str(i)] + 1
else:
temp_col_count_dict['hma_wma_' + str(i)] = 0
# after halving the periods and rounding, there may be two intervals with same value e.g.
# 2.6 & 2.8 both would lead to same value (3) after rounding. So save as diff columns
self.df['hma_wma_' + str(i) + '_' + str(temp_col_count_dict['hma_wma_' + str(i)])] = 2 * res
elif hma_step == 3:
import re
expr = r"^hma_[0-9]{1}"
columns = list(self.df.columns)
# print("searching", expr, "in", columns, "res=", list(filter(re.compile(expr).search, columns)))
self.df['hma_' + str(len(list(filter(re.compile(expr).search, columns))))] = res
def get_HMA(self, col_name: str, intervals: int):
import re
stime = time.time()
print("Calculating HMA")
expr = r"^wma_.*"
if len(list(filter(re.compile(expr).search, list(self.df.columns)))) > 0:
print("WMA calculated already. Proceed with HMA")
else:
print("Need WMA first...")
self.get_WMA(col_name, intervals)
intervals_half = np.round([i / 2 for i in intervals]).astype(int)
# step 1 = WMA for interval/2
# this creates cols with prefix 'hma_wma_*'
self.get_WMA(col_name, intervals_half, 1)
# print("step 1 done", list(df.columns))
# step 2 = step 1 - WMA
columns = list(self.df.columns)
expr = r"^hma_wma.*"
hma_wma_cols = list(filter(re.compile(expr).search, columns))
rest_cols = [x for x in columns if x not in hma_wma_cols]
expr = r"^wma.*"
wma_cols = list(filter(re.compile(expr).search, rest_cols))
self.df[hma_wma_cols] = self.df[hma_wma_cols].sub(self.df[wma_cols].values,
fill_value=0) # .rename(index=str, columns={"close": "col1", "rsi_6": "col2"})
# df[0:10].copy().reset_index(drop=True).merge(temp.reset_index(drop=True), left_index=True, right_index=True)
# step 3 = WMA(step 2, interval = sqrt(n))
intervals_sqrt = np.round([np.sqrt(i) for i in intervals]).astype(int)
for i, col in tqdm(enumerate(hma_wma_cols)):
# print("step 3", col, intervals_sqrt[i])
self.get_WMA(col, [intervals_sqrt[i]], 3)
self.df.drop(columns=hma_wma_cols, inplace=True)
def get_CCI(self, col_name: str, intervals: int):
print("Calculating CCI")
for i in tqdm(intervals):
self.df['cci_' + str(i)] = cci(self.df['high'], self.df['low'], self.df['close'], i, fillna=True)
| en | 0.64224 | # Class setup indicators with ta library: # TODO initialize df here both libs gave same result Momentum indicator # df_ss = sdf.retype(df) # df['wr_'+str(i)] = df_ss['wr_'+str(i)] Not used Same for both calculated for same 12 and 26 periods on close only. Not different periods. creates colums macd, macds, macdh Momentum indicator # not working? Needs validation Momentum indicator # df["ema_"+str(intervals[0])+'_1'] = ema_indicator(df['close'], i, fillna=True) Chande Momentum Oscillator As per https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/cmo CMO = 100 * ((Sum(ups) - Sum(downs))/ ( (Sum(ups) + Sum(downs) ) ) range = +100 to -100 params: df -> dataframe with financial instrument history col_name -> column name for which CMO is to be calculated intervals -> list of periods for which to calculated return: None (adds the result in a column) # num_gains = (series >= 0).sum() # num_losses = (series < 0).sum() # skip na Momentum indicator # don't show progress for internal WMA calculation for HMA # print("interval {} has unique values {}".format(i, res.unique())) # after halving the periods and rounding, there may be two intervals with same value e.g. # 2.6 & 2.8 both would lead to same value (3) after rounding. So save as diff columns # print("searching", expr, "in", columns, "res=", list(filter(re.compile(expr).search, columns))) # step 1 = WMA for interval/2 # this creates cols with prefix 'hma_wma_*' # print("step 1 done", list(df.columns)) # step 2 = step 1 - WMA # .rename(index=str, columns={"close": "col1", "rsi_6": "col2"}) # df[0:10].copy().reset_index(drop=True).merge(temp.reset_index(drop=True), left_index=True, right_index=True) # step 3 = WMA(step 2, interval = sqrt(n)) # print("step 3", col, intervals_sqrt[i]) | 2.062874 | 2 |
workbaskets/migrations/0001_initial.py | uktrade/tamato | 14 | 6612617 | <gh_stars>10-100
# Generated by Django 3.1 on 2021-01-06 15:33
import django.db.models.deletion
import django_fsm
from django.conf import settings
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="WorkBasket",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"title",
models.CharField(
db_index=True,
help_text="Short name for this workbasket",
max_length=255,
unique=True,
),
),
(
"reason",
models.TextField(
blank=True, help_text="Reason for the changes to the tariff"
),
),
(
"status",
django_fsm.FSMField(
choices=[
("NEW_IN_PROGRESS", "New - in progress"),
("EDITING", "Editing"),
("AWAITING_APPROVAL", "Awaiting approval"),
("APPROVAL_REJECTED", "Failed approval"),
("READY_FOR_EXPORT", "Ready for export"),
(
"AWAITING_CDS_UPLOAD_CREATE_NEW",
"Awaiting CDS upload - create new",
),
("AWAITING_CDS_UPLOAD_EDIT", "Awaiting CDS upload - edit"),
(
"AWAITING_CDS_UPLOAD_OVERWRITE",
"Awaiting CDS upload - overwrite",
),
(
"AWAITING_CDS_UPLOAD_DELETE",
"Awaiting CDS upload - delete",
),
("SENT_TO_CDS", "Sent to CDS"),
("SENT_TO_CDS_DELETE", "Sent to CDS - delete"),
("PUBLISHED", "Published"),
("CDS_ERROR", "CDS error"),
],
db_index=True,
default="NEW_IN_PROGRESS",
max_length=50,
),
),
(
"approver",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="approved_workbaskets",
to=settings.AUTH_USER_MODEL,
),
),
(
"author",
models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
]
| # Generated by Django 3.1 on 2021-01-06 15:33
import django.db.models.deletion
import django_fsm
from django.conf import settings
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="WorkBasket",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"title",
models.CharField(
db_index=True,
help_text="Short name for this workbasket",
max_length=255,
unique=True,
),
),
(
"reason",
models.TextField(
blank=True, help_text="Reason for the changes to the tariff"
),
),
(
"status",
django_fsm.FSMField(
choices=[
("NEW_IN_PROGRESS", "New - in progress"),
("EDITING", "Editing"),
("AWAITING_APPROVAL", "Awaiting approval"),
("APPROVAL_REJECTED", "Failed approval"),
("READY_FOR_EXPORT", "Ready for export"),
(
"AWAITING_CDS_UPLOAD_CREATE_NEW",
"Awaiting CDS upload - create new",
),
("AWAITING_CDS_UPLOAD_EDIT", "Awaiting CDS upload - edit"),
(
"AWAITING_CDS_UPLOAD_OVERWRITE",
"Awaiting CDS upload - overwrite",
),
(
"AWAITING_CDS_UPLOAD_DELETE",
"Awaiting CDS upload - delete",
),
("SENT_TO_CDS", "Sent to CDS"),
("SENT_TO_CDS_DELETE", "Sent to CDS - delete"),
("PUBLISHED", "Published"),
("CDS_ERROR", "CDS error"),
],
db_index=True,
default="NEW_IN_PROGRESS",
max_length=50,
),
),
(
"approver",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="approved_workbaskets",
to=settings.AUTH_USER_MODEL,
),
),
(
"author",
models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
] | en | 0.816033 | # Generated by Django 3.1 on 2021-01-06 15:33 | 1.671608 | 2 |
src/kafka_protobuf/protobuf_transform.py | ZhangShuoAlreadyExists/ProtocolMessage | 2 | 6612618 | from src.libproto.protobuf_general_pb2 import *
from google.protobuf import message as message
import zlib
def encode_message(msg, metadata, compress):
outer = proto_general(meta = metadata)
outer.msg.type_url = "/" + msg.DESCRIPTOR.full_name
if compress:
outer.msg.value = zlib.compress(msg.SerializeToString(), -1)
outer.compressed = True
else:
outer.msg.value = msg.SerializeToString()
outer.compressed = False
return outer.SerializeToString()
def decode_message(msg_bytes):
outer = proto_general()
outer.ParseFromString(msg_bytes)
# Not sure if parser will raise exception. This if may not work
if not isinstance(outer, message.Message):
return msg_bytes
#print outer.msg.type_url
# only keep message type, trim others
inner_type = outer.msg.type_url.split('/')[-1].split('.')[-1]
mod = __import__('src.libproto.%s_pb2' % inner_type, fromlist=True)
msg = getattr(mod, inner_type)()
if outer.compressed:
msg.ParseFromString(zlib.decompress(outer.msg.value))
else:
msg.ParseFromString(outer.msg.value)
return [msg, outer.meta]
| from src.libproto.protobuf_general_pb2 import *
from google.protobuf import message as message
import zlib
def encode_message(msg, metadata, compress):
outer = proto_general(meta = metadata)
outer.msg.type_url = "/" + msg.DESCRIPTOR.full_name
if compress:
outer.msg.value = zlib.compress(msg.SerializeToString(), -1)
outer.compressed = True
else:
outer.msg.value = msg.SerializeToString()
outer.compressed = False
return outer.SerializeToString()
def decode_message(msg_bytes):
outer = proto_general()
outer.ParseFromString(msg_bytes)
# Not sure if parser will raise exception. This if may not work
if not isinstance(outer, message.Message):
return msg_bytes
#print outer.msg.type_url
# only keep message type, trim others
inner_type = outer.msg.type_url.split('/')[-1].split('.')[-1]
mod = __import__('src.libproto.%s_pb2' % inner_type, fromlist=True)
msg = getattr(mod, inner_type)()
if outer.compressed:
msg.ParseFromString(zlib.decompress(outer.msg.value))
else:
msg.ParseFromString(outer.msg.value)
return [msg, outer.meta]
| en | 0.512931 | # Not sure if parser will raise exception. This if may not work #print outer.msg.type_url # only keep message type, trim others | 2.368749 | 2 |
classes/MOSOSC.py | davidreeder/Python-MOSToolkit | 0 | 6612619 | <reponame>davidreeder/Python-MOSToolkit
# -o--
"""
MOSOSC.py (class)
Wrapper for https://pypi.org/project/python-osc, version 1.8.0.
Backwards compatible to (at least), version 1.7.4.
Provides control over creation and management of...
* OSC client and server
* incrementally aggregated messages and bundles
* sending to OSC paths
* receiving with custom OSC path handlers
* automated OSC path logging on send and receive
* function hook for default path processing
Choices for this initial API are in the service of a simple, unified
interface to the larger offering of pythonosc. MOSOSC does not
comprehensively represent the whole of pythonosc.
Resources:
* https://en.wikipedia.org/wiki/Open_Sound_Control
* opensoundcontrol.org
* https://web.archive.org/web/20030914224904/http://cnmat.berkeley.edu/OSC/OSC-spec.html
* https://www.linuxjournal.com/content/introduction-osc
"""
#---------------------------------------------------------------------
# Copyright (C) <NAME> 2021. <EMAIL>
# Distributed under the Boost Software License, Version 1.0.
# (See ./LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
#---------------------------------------------------------------------
version :str = "0.2" #RELEASE
USAGE :str = "[hostname:str], [port:int]"
#----------------------------------------- -o--
# Modules.
from typing import Any, List, Tuple, Union
from types import FunctionType
#
from pythonosc import udp_client
from pythonosc import osc_server
from pythonosc import dispatcher
from pythonosc.osc_message_builder import OscMessageBuilder
from pythonosc.osc_bundle_builder import OscBundleBuilder
from pythonosc import osc_message
from pythonosc import osc_bundle
#
import MOSLog
log = MOSLog.MOSLog(logTime=True, logDate=False)
# NB Suggested invocation of MOSLog for logging MOSLog.osc().
import MOSZ as z
import MOSDump as dump
#----------------------------------------- -o--
class MOSOSC:
"""
SHARED ATTRIBUTES--
hostname
port
enablePathLogging
CLIENT METHODS--
createClient()
destroyClient()
message()
messageAdd()
messageSend()
bundle()
bundleAdd()
bundleSend()
send()
postOSCArgs()
SERVER METHODS--
createServer()
destroyServer()
startServer()
stopServer()
addPathHandler()
removePathHandler()
listPathHandlers()
parseEventArgs()
SERVER ATTRIBUTES--
enablePathHandlerDefault
pathHandlerDefaultFunction
enableSourceAddrLogging
NB All OSC paths must begin with slash and be at least
one character long. ("/?")
NB Message and bundle creation is composable...
message() + [messageAdd()] + send()
...or just one call: messageSend(). Bundles are similar.
NB
* Incoming OSC path will match all valid handlers.
* Use globbing in OSC path names to match multiple incoming OSC paths.
* Optionally use default handler function to capture unmatched OSC paths.
Redirect stderr to squelch DEBUG messages from default handler.
ASSUME Each MOSOSC instance is used ONLY as client or as server.
See class header and pydoc for full details.
"""
#=============================================== -o--
# Shared public attributes.
# NB hostname and port are effectively read-only.
# Set them is via input to the class constructor,
# createServer() or createClient().
#
hostname :str = None
port :int = None
enablePathLogging :bool = True #DEFAULT
# Log the oscPath and associated arguments with log.osc().
# Use this attributes in custom oscPath handlers to unify logging
# control across all handlers.
#----------------------------------------------- -o--
# Shared protected attributes.
_hostnameDefault :str = "localhost" #DEFAULT
_portDefault :int = 50001 #DEFAULT
#----------------------------------------------- -o--
# Lifecycle.
# -o-
def __init__(self, hostname:str=None, port:int=None):
"""
hostname and port define server target.
Public attributes hostname and port shared between client and server.
ASSUME Each MOSOSC instance is used ONLY as client or as server.
"""
self._validateHostnameAndPort(hostname, port)
#----------------------------------------------- -o--
# Shared protected methods.
# -o-
# NB Checks for type and syntax.
# XXX No checks for connectivity.
#
def _validateHostnameAndPort( self,
hostname :str = None,
port :int = None,
exitValue :int = 1
) -> None:
if not hostname: hostname = self._hostnameDefault
if not port: port = self._portDefault
#
if not isinstance(hostname, str) or not isinstance(port, int):
z.postDefUsage(log.className(), USAGE)
return
if (len(hostname) <= 0):
z.postAndExit("%s(): hostname is EMPTY." % log.className(), exitValue=exitValue)
return
if port < 1024:
z.postAndExit( "%s(): port MUST BE GREATER than 1024. (%s)" % (log.className(), port),
exitValue=exitValue )
return
#
self.hostname = hostname
self.port = port
#ENDDEF -- _validateHostnameAndPort()
# -o-
# OSC paths must begin with slash ("/") and be at least two characters long.
#
def _validateOSCPath(self, oscPath) -> bool:
if len(oscPath) < 2 \
or (oscPath[0] != "/"):
log.critical(f"OSC path is malformed. ({oscPath})")
#=============================================== -o--
# Client protected attributes.
_client :udp_client.UDPClient = None
#----------------------------------------------- -o--
# Client public methods.
# -o-
# Client runs as UDPClient. pythonosc also offers SimpleUDPClient.
#
def createClient( self,
hostname :str = None,
port :int = None,
enableBroadcast :bool = False,
) -> None:
"""
One client per instance. Client sends to server at hostname:port.
"""
if self._client:
log.critical("Client is ALREADY CREATED.")
self._validateHostnameAndPort(hostname, port)
self._client = udp_client.UDPClient(self.hostname, self.port, enableBroadcast)
#
enableBroadcastString = ""
if enableBroadcast:
enableBroadcastString = " Broadcast IS ENABLED."
log.info(f"Created client to {self.hostname}:{self.port}.{enableBroadcastString}")
# -o-
def destroyClient(self) -> None:
if not self._client:
log.warning("Client is already UNDEFINED.")
return
self._client = None
log.info(f"Destroyed client to {self.hostname}:{self.port}.")
# -o-
def message( self,
oscPath :str,
*messageArgs :Tuple[Any],
sendMessageNow :bool = False,
) -> OscMessageBuilder:
"""
NB Removes instances of None from messageArgs.
"""
self._validateClientSetup()
self._validateOSCPath(oscPath)
#
messageBuilder = OscMessageBuilder(oscPath)
for arg in messageArgs:
if None is arg: continue
messageBuilder.add_arg(arg)
if sendMessageNow:
self.send(messageBuilder)
#
return messageBuilder
# -o-
def messageAdd( self,
messageBuilder :OscMessageBuilder,
*messageArgs :Tuple[Any],
) -> OscMessageBuilder:
"""
NB Removes instances of None from messageArgs.
"""
self._validateClientSetup()
if not isinstance(messageBuilder, OscMessageBuilder) \
or (len(messageArgs) <= 0):
log.critical("One or more input ARGUMENTS ARE INVALID.")
#
for arg in messageArgs:
if None is arg: continue
messageBuilder.add_arg(arg)
return messageBuilder
# -o-
def messageSend(self, oscPath:str, *messageArgs:Tuple[Any]) -> OscMessageBuilder:
return self.message(oscPath, *messageArgs, sendMessageNow=True)
# -o-
def bundle( self,
*bundleArgs :Tuple[Union[ OscMessageBuilder, OscBundleBuilder ]],
delayTimeInSeconds :float = 0, #NB osc_bundle_builder.IMMEDIATELY,
sendBundleNow :bool = False,
) -> OscBundleBuilder:
"""
When delayTimeInSeconds is zero (0), the received OSC message
is executed immediately. Otherwise, delay execution for N seconds.
Per OSC standard.
"""
self._validateClientSetup()
if (delayTimeInSeconds < 0):
log.critical(f"delayTimeInSeconds IS INVALID. ({delayTimeInSeconds})")
#
timestamp = 0
if delayTimeInSeconds > 0:
timestamp = z.timeNowInSeconds(delayTimeInSeconds)
bundleBuilder = OscBundleBuilder(timestamp)
#
for arg in bundleArgs:
bundleBuilder.add_content(arg.build())
if sendBundleNow:
if len(bundleArgs) <= 0: # XXX Never reached.
log.critical("Cannot send BUNDLE WITH NO CONTENT.")
self.send(bundleBuilder)
#
return bundleBuilder
# -o-
def bundleAdd( self,
bundleBuilder :OscBundleBuilder,
*bundleArgs :Tuple[Union[ OscMessageBuilder, OscBundleBuilder ]],
) -> OscBundleBuilder:
self._validateClientSetup()
if not isinstance(bundleBuilder, OscBundleBuilder) \
or (len(bundleArgs) <= 0):
log.critical("One or more input ARGUMENTS ARE INVALID.")
for arg in bundleArgs:
bundleBuilder.add_content(arg.build())
return bundleBuilder
# -o-
def bundleSend( self,
bundleArgs :Tuple[Union[ OscMessageBuilder, OscBundleBuilder ]],
delayTimeInSeconds :float = 0, #NB osc_bundle_builder.IMMEDIATELY
) -> OscBundleBuilder:
"""
NB bundleSend() with no bundleArgs will fail.
Use send() directly if bundle content is already added.
"""
return self.bundle(bundleArgs, delayTimeInSeconds=delayTimeInSeconds, sendBundleNow=True)
# -o-
def send( self,
messageOrBundleBuilder :Union[OscMessageBuilder, OscBundleBuilder],
) -> None:
self._validateClientSetup()
if not isinstance(messageOrBundleBuilder, OscMessageBuilder) \
and not isinstance(messageOrBundleBuilder, OscBundleBuilder):
log.critical("messageOrBundleBuilder IS INVALID.")
#
self._client.send(messageOrBundleBuilder.build())
if self.enablePathLogging:
self.postOSCArgs(messageOrBundleBuilder)
# -o-
def postOSCArgs( self,
messageOrBundleBuilder :Union[OscMessageBuilder, OscBundleBuilder],
) -> None:
"""
Post OSC args via log.osc() for any OscMessageBuilder or OscBundleBuilder.
Occurs automatically when enablePathLogging is True.
"""
def postOSC(message:osc_message.OscMessage, atTimestamp:float=0) -> None:
delayString :str = ""
if atTimestamp > 0:
delayRemaining = atTimestamp - z.timeNowInSeconds()
delayString = f" :: remaining delay {delayRemaining:.3f} @ time {atTimestamp:.3f}"
log.osc(f"{message.address} {z.c2s(message._parameters)}{delayString}")
#ENDDEF -- postOSC()
#
def findMessageInBundle( bundleOrMessage:Union[osc_message.OscMessage,osc_bundle.OscBundle],
atTimestamp :float = 0,
) -> None:
#
if isinstance(bundleOrMessage, osc_message.OscMessage):
postOSC(bundleOrMessage, atTimestamp)
# Unwrap bundle to find messages.
# NB Getter bug: OscBundle.timestamp()->int !
#
else:
for _ in bundleOrMessage._contents:
if isinstance(_, osc_message.OscMessage):
postOSC(_, bundleOrMessage._timestamp)
else:
findMessageInBundle(_, _._timestamp)
#ENDDEF -- findMessageInBundle()
#
mos = messageOrBundleBuilder.build()
findMessageInBundle(mos)
#ENDDEF -- postOSCArgs()
#----------------------------------------------- -o--
# Client protected methods.
# -o-
def _validateClientSetup(self):
if not self._client:
log.critical("Client is UNDEFINED.")
#=============================================== -o--
# Server public attributes.
enablePathHandlerDefault :bool = True #DEFAULT
# createServer() automatically defines a method to capture oscPaths
# that are not named by a custom handler.
#
# If False, the oscPath handler default returns before taking action.
# If set False before calling createServer(), the oscPath handler
# default will not be created.
pathHandlerDefaultFunction :FunctionType = None #DEFAULT
# Run a function for every oscPath captured by the default handler.
# See _pathHandlerDefault() for function signature.
enableSourceAddrLogging :bool = True #DEFAULT
# Log the source hostname and port. In the oscPath default
# handler, this is logged with oscPath.
#----------------------------------------------- -o--
# Server protected attributes.
_server :osc_server.ThreadingOSCUDPServer = None
_dispatcher :dispatcher.Dispatcher = None
#
_pathHandlersReceiveSourceAddr :bool = True #DEFAULT
# NB This value is used when the Dispatcher creates a handler.
# See createServer() and addPathHandler().
#
# By DEFAULT, all handlers receive the OSC path source address
# information. To prevent the logging of source address, set
# enableSourceAddrLogging to False.
_isServerRunning :bool = False
# True if server is running.
_willDestroyServer :bool = False
# Indicate that server is schedule for destruction.
# In this state, it shall not be restarted.
#----------------------------------------------- -o--
# Server public methods.
#
# One server and one dispatcher per class instance.
# Dispatcher can be updated, even after server is running.
#
# Server instance runs as ThreadingOSCUDPServer.
# pythonosc also offers:
# . AsyncIOOSCUDPServer
# . BlockingOSCUDPServer
# . ForkingOSCUDPServer
#
# -o-
def createServer( self,
hostname :str = None,
port :int = None,
) -> None:
"""
Create server without starting it.
Server is always created with a dispatcher.
Dispatcher is created by DEFAULT and set to default oscPath
handler, which user may choose to disable.
"""
if self._server:
log.critical("Server is ALREADY CREATED.", exitValue=1)
self._validateHostnameAndPort(hostname, port)
#
self._dispatcher = dispatcher.Dispatcher()
if self.enablePathHandlerDefault:
self._dispatcher.set_default_handler(
self._pathHandlerDefault,
needs_reply_address=self._pathHandlersReceiveSourceAddr )
#
try:
self._server = osc_server.ThreadingOSCUDPServer(
(self.hostname, self.port), self._dispatcher )
except Exception as e:
if 48 == e.errno:
log.critical( "Server ALREADY RUNNING on " +
f"{self.hostname}:{self.port}.",
exitValue=1 )
else:
log.critical(e, exitValue=1)
#ENDDEF -- createServer()
# -o-
def destroyServer(self) -> None:
"""
Destroy server, dispatcher, all oscPath handlers and default
handler function.
"""
self._validateServerSetup()
self._willDestroyServer = True
self.stopServer()
self._dispatcher.set_default_handler(None)
self._dispatcher = None
self._server = None
self._willDestroyServer = False
# -o-
def startServer(self) -> None:
self._validateServerSetup()
#
if self._isServerRunning:
log.warning("Server is ALREADY RUNNING at %s:%s..." % (self.hostname, self.port))
return
if self._willDestroyServer:
log.warning("Server at %s:%s is SCHEDULED FOR DESTRUCTION..." % (self.hostname, self.port))
return
#
log.info("Server STARTING at %s:%s..." % (self.hostname, self.port))
self._isServerRunning = True
self._server.serve_forever()
self._isServerRunning = False
# -o-
def stopServer(self) -> None:
self._validateServerSetup()
if self._isServerRunning:
self._server.shutdown()
self._isServerRunning = False
log.info("...Server at %s:%s is STOPPED." % (self.hostname, self.port))
else:
log.info("Server at %s:%s is ALREADY STOPPED." % (self.hostname, self.port))
# -o-
def addPathHandler( self,
oscPath :str,
oscPathHandler :FunctionType,
*userArgs :List[Any]
) -> None:
"""
Give OSC path handlers a simple signature, and use parseEventArgs()
to resolve essential parameters:
def handlerFunction(*eventArgs):
sourceHostname, sourcePort, oscPath, oscArgs, userArgs = \\
self.parseEventArgs(eventArgs, postOSCPath=True)
...
userArgs -- Arbitrary parameters or (function) pointers defined by
addPathHandler() invocation.
NB--
* Incoming OSC path will match all valid handlers.
* Use globbing in OSC path names to match multiple incoming OSC paths.
* Optionally use default handler function to capture unmatched OSC paths.
Redirect stderr to squelch DEBUG messages from default handler.
"""
self._validateServerSetup()
self._validateOSCPath(oscPath)
if self._isServerRunning:
log.error(f"CANNOT add or remove OSC path handlers while SERVER IS RUNNING. ({oscPath})")
return
#
self._dispatcher.map( oscPath,
oscPathHandler,
userArgs,
needs_reply_address=self._pathHandlersReceiveSourceAddr )
log.info(f"Added OSC path handler \"{oscPath}\".")
# -o-
def removePathHandler(self, oscPath:str) -> None:
self._validateServerSetup()
self._validateOSCPath(oscPath)
if self._isServerRunning:
log.error(f"CANNOT add or remove OSC path handlers while SERVER IS RUNNING. ({oscPath})")
return
#
try:
self._dispatcher._map.pop(oscPath)
log.info(f"Removed OSC path handler \"{oscPath}\".")
except KeyError:
log.error(f"oscPath DOES NOT EXIST. ({oscPath})")
except Exception as e:
log.critical(e, exitValue=1)
# -o-
def listPathHandlers(self) -> None:
self._validateServerSetup()
registeredOSCPaths :List[str] = list(self._dispatcher._map.keys())
log.info(dump.listo(registeredOSCPaths, title="OSC Path Handlers", sort=True))
# -o-
def parseEventArgs( self,
eventArgs :Tuple[Any],
expectUserArgs :bool = True,
postOSCPath :bool = True,
) -> Tuple[str, int, str, List[Any], List[Any]]:
"""
RETURNS: Tuple[str, int, str, List[Any], List[Any]]
:: (sourceHostname, sourcePort, oscPath, oscArgs, userArgs)
Optionally post oscPath via log.osc().
Returns components of OSC event in a tuple.
expectUserArgs -- Then True (DEFAULT), expect additional arguments
from custom OSC path handler.
postOSCPath -- Local toggle, override global toggle, for posting OSC path.
See also public attributes: enablePathLogging, enableSourceAddrLogging.
NB Whether MOSOSC returns source hostname/port to every handler
is determined by MOSOSC._pathHandlersReceiveSourceAddr (DEFAULT:True).
"""
sourceHostname :str = None
sourcePort :int = None
oscPath :str = None
userArgs :List[Any] = []
oscArgs :List[Any] = []
eventList :List[Any] = list(eventArgs)
sourceAddrString :str = ""
# ASSUME eventArgs tuple is of the form...
#
# ( [sourceAddrTuple], oscPath, [userArgsTuple], oscArgsTuple )
#
# ...where:
# * sourceAddrTuple exists if _pathHandlersReceiveSourceAddr is True;
# * userAgrs exists if called from a custom oscPath handler.
#
if isinstance(eventList[0], tuple):
sourceHostname, sourcePort = eventList.pop(0)
if self.enableSourceAddrLogging:
sourceAddrString = f" :: {sourceHostname}:{sourcePort}"
oscPath = eventList.pop(0) + " "
if expectUserArgs:
userArgs = list(eventList.pop(0)[0])
oscArgs = eventList
#
if self.enablePathLogging and postOSCPath: # Global and local toggles.
log.osc(f"{oscPath}{z.c2s(oscArgs)}{sourceAddrString}")
return (sourceHostname, sourcePort, oscPath.strip(), oscArgs, userArgs)
#----------------------------------------------- -o--
# Server protected methods.
# -o-
# ASSUME If Server is defined, then so also is all Server support,
# including Dispatcher and default oscPath handler.
#
def _validateServerSetup(self):
if not self._server:
log.critical("Server is UNDEFINED.")
# -o-
# NB First argument represents working instance of this class,
# passed in by calling environment.
#
# Q Impossible to get same result by passing default handler into
# class? Handlers fail to recognize postSourceAddr, and lose further
# information when postSourceAddr is not enabled.
#
def _pathHandlerDefault( mososc,
*eventArgs :Tuple[Any]
) -> None:
"""
If pathHandlerDefaultFunction is defined as a function, it will be
called if enablePathHandlerDefault is True.
pathHandlerDefaultFunction() REQUIRES the following signature:
pathHandlerDefaultFunction( mososc,
sourceHostname :str,
sourcePort :int,
oscPath :str,
oscArgs :List[Any],
) -> None
mososc -- Same instance of MOSOSC as contains all other methods.
sourceHostname / sourcePort -- Network origin of the oscPath sent
to the server. Available when _pathHandlersReceiveSourceAddr
is True.
oscPath / oscArgs -- OSC pathname and associated arguments.
oscArgs is List of zero (0) or more elements.
See also public attributes: enablePathHandlerDefault, pathHandlerDefaultFunction.
"""
if not mososc.enablePathHandlerDefault: return
sourceHostname, sourcePort, oscPath, oscArgs, _ = \
mososc.parseEventArgs(eventArgs, expectUserArgs=False)
if mososc.pathHandlerDefaultFunction:
mososc.pathHandlerDefaultFunction(mososc, sourceHostname, sourcePort, oscPath, oscArgs)
#ENDCLASS -- MOSOSC()
| # -o--
"""
MOSOSC.py (class)
Wrapper for https://pypi.org/project/python-osc, version 1.8.0.
Backwards compatible to (at least), version 1.7.4.
Provides control over creation and management of...
* OSC client and server
* incrementally aggregated messages and bundles
* sending to OSC paths
* receiving with custom OSC path handlers
* automated OSC path logging on send and receive
* function hook for default path processing
Choices for this initial API are in the service of a simple, unified
interface to the larger offering of pythonosc. MOSOSC does not
comprehensively represent the whole of pythonosc.
Resources:
* https://en.wikipedia.org/wiki/Open_Sound_Control
* opensoundcontrol.org
* https://web.archive.org/web/20030914224904/http://cnmat.berkeley.edu/OSC/OSC-spec.html
* https://www.linuxjournal.com/content/introduction-osc
"""
#---------------------------------------------------------------------
# Copyright (C) <NAME> 2021. <EMAIL>
# Distributed under the Boost Software License, Version 1.0.
# (See ./LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
#---------------------------------------------------------------------
version :str = "0.2" #RELEASE
USAGE :str = "[hostname:str], [port:int]"
#----------------------------------------- -o--
# Modules.
from typing import Any, List, Tuple, Union
from types import FunctionType
#
from pythonosc import udp_client
from pythonosc import osc_server
from pythonosc import dispatcher
from pythonosc.osc_message_builder import OscMessageBuilder
from pythonosc.osc_bundle_builder import OscBundleBuilder
from pythonosc import osc_message
from pythonosc import osc_bundle
#
import MOSLog
log = MOSLog.MOSLog(logTime=True, logDate=False)
# NB Suggested invocation of MOSLog for logging MOSLog.osc().
import MOSZ as z
import MOSDump as dump
#----------------------------------------- -o--
class MOSOSC:
"""
SHARED ATTRIBUTES--
hostname
port
enablePathLogging
CLIENT METHODS--
createClient()
destroyClient()
message()
messageAdd()
messageSend()
bundle()
bundleAdd()
bundleSend()
send()
postOSCArgs()
SERVER METHODS--
createServer()
destroyServer()
startServer()
stopServer()
addPathHandler()
removePathHandler()
listPathHandlers()
parseEventArgs()
SERVER ATTRIBUTES--
enablePathHandlerDefault
pathHandlerDefaultFunction
enableSourceAddrLogging
NB All OSC paths must begin with slash and be at least
one character long. ("/?")
NB Message and bundle creation is composable...
message() + [messageAdd()] + send()
...or just one call: messageSend(). Bundles are similar.
NB
* Incoming OSC path will match all valid handlers.
* Use globbing in OSC path names to match multiple incoming OSC paths.
* Optionally use default handler function to capture unmatched OSC paths.
Redirect stderr to squelch DEBUG messages from default handler.
ASSUME Each MOSOSC instance is used ONLY as client or as server.
See class header and pydoc for full details.
"""
#=============================================== -o--
# Shared public attributes.
# NB hostname and port are effectively read-only.
# Set them is via input to the class constructor,
# createServer() or createClient().
#
hostname :str = None
port :int = None
enablePathLogging :bool = True #DEFAULT
# Log the oscPath and associated arguments with log.osc().
# Use this attributes in custom oscPath handlers to unify logging
# control across all handlers.
#----------------------------------------------- -o--
# Shared protected attributes.
_hostnameDefault :str = "localhost" #DEFAULT
_portDefault :int = 50001 #DEFAULT
#----------------------------------------------- -o--
# Lifecycle.
# -o-
def __init__(self, hostname:str=None, port:int=None):
"""
hostname and port define server target.
Public attributes hostname and port shared between client and server.
ASSUME Each MOSOSC instance is used ONLY as client or as server.
"""
self._validateHostnameAndPort(hostname, port)
#----------------------------------------------- -o--
# Shared protected methods.
# -o-
# NB Checks for type and syntax.
# XXX No checks for connectivity.
#
def _validateHostnameAndPort( self,
hostname :str = None,
port :int = None,
exitValue :int = 1
) -> None:
if not hostname: hostname = self._hostnameDefault
if not port: port = self._portDefault
#
if not isinstance(hostname, str) or not isinstance(port, int):
z.postDefUsage(log.className(), USAGE)
return
if (len(hostname) <= 0):
z.postAndExit("%s(): hostname is EMPTY." % log.className(), exitValue=exitValue)
return
if port < 1024:
z.postAndExit( "%s(): port MUST BE GREATER than 1024. (%s)" % (log.className(), port),
exitValue=exitValue )
return
#
self.hostname = hostname
self.port = port
#ENDDEF -- _validateHostnameAndPort()
# -o-
# OSC paths must begin with slash ("/") and be at least two characters long.
#
def _validateOSCPath(self, oscPath) -> bool:
if len(oscPath) < 2 \
or (oscPath[0] != "/"):
log.critical(f"OSC path is malformed. ({oscPath})")
#=============================================== -o--
# Client protected attributes.
_client :udp_client.UDPClient = None
#----------------------------------------------- -o--
# Client public methods.
# -o-
# Client runs as UDPClient. pythonosc also offers SimpleUDPClient.
#
def createClient( self,
hostname :str = None,
port :int = None,
enableBroadcast :bool = False,
) -> None:
"""
One client per instance. Client sends to server at hostname:port.
"""
if self._client:
log.critical("Client is ALREADY CREATED.")
self._validateHostnameAndPort(hostname, port)
self._client = udp_client.UDPClient(self.hostname, self.port, enableBroadcast)
#
enableBroadcastString = ""
if enableBroadcast:
enableBroadcastString = " Broadcast IS ENABLED."
log.info(f"Created client to {self.hostname}:{self.port}.{enableBroadcastString}")
# -o-
def destroyClient(self) -> None:
if not self._client:
log.warning("Client is already UNDEFINED.")
return
self._client = None
log.info(f"Destroyed client to {self.hostname}:{self.port}.")
# -o-
def message( self,
oscPath :str,
*messageArgs :Tuple[Any],
sendMessageNow :bool = False,
) -> OscMessageBuilder:
"""
NB Removes instances of None from messageArgs.
"""
self._validateClientSetup()
self._validateOSCPath(oscPath)
#
messageBuilder = OscMessageBuilder(oscPath)
for arg in messageArgs:
if None is arg: continue
messageBuilder.add_arg(arg)
if sendMessageNow:
self.send(messageBuilder)
#
return messageBuilder
# -o-
def messageAdd( self,
messageBuilder :OscMessageBuilder,
*messageArgs :Tuple[Any],
) -> OscMessageBuilder:
"""
NB Removes instances of None from messageArgs.
"""
self._validateClientSetup()
if not isinstance(messageBuilder, OscMessageBuilder) \
or (len(messageArgs) <= 0):
log.critical("One or more input ARGUMENTS ARE INVALID.")
#
for arg in messageArgs:
if None is arg: continue
messageBuilder.add_arg(arg)
return messageBuilder
# -o-
def messageSend(self, oscPath:str, *messageArgs:Tuple[Any]) -> OscMessageBuilder:
return self.message(oscPath, *messageArgs, sendMessageNow=True)
# -o-
def bundle( self,
*bundleArgs :Tuple[Union[ OscMessageBuilder, OscBundleBuilder ]],
delayTimeInSeconds :float = 0, #NB osc_bundle_builder.IMMEDIATELY,
sendBundleNow :bool = False,
) -> OscBundleBuilder:
"""
When delayTimeInSeconds is zero (0), the received OSC message
is executed immediately. Otherwise, delay execution for N seconds.
Per OSC standard.
"""
self._validateClientSetup()
if (delayTimeInSeconds < 0):
log.critical(f"delayTimeInSeconds IS INVALID. ({delayTimeInSeconds})")
#
timestamp = 0
if delayTimeInSeconds > 0:
timestamp = z.timeNowInSeconds(delayTimeInSeconds)
bundleBuilder = OscBundleBuilder(timestamp)
#
for arg in bundleArgs:
bundleBuilder.add_content(arg.build())
if sendBundleNow:
if len(bundleArgs) <= 0: # XXX Never reached.
log.critical("Cannot send BUNDLE WITH NO CONTENT.")
self.send(bundleBuilder)
#
return bundleBuilder
# -o-
def bundleAdd( self,
bundleBuilder :OscBundleBuilder,
*bundleArgs :Tuple[Union[ OscMessageBuilder, OscBundleBuilder ]],
) -> OscBundleBuilder:
self._validateClientSetup()
if not isinstance(bundleBuilder, OscBundleBuilder) \
or (len(bundleArgs) <= 0):
log.critical("One or more input ARGUMENTS ARE INVALID.")
for arg in bundleArgs:
bundleBuilder.add_content(arg.build())
return bundleBuilder
# -o-
def bundleSend( self,
bundleArgs :Tuple[Union[ OscMessageBuilder, OscBundleBuilder ]],
delayTimeInSeconds :float = 0, #NB osc_bundle_builder.IMMEDIATELY
) -> OscBundleBuilder:
"""
NB bundleSend() with no bundleArgs will fail.
Use send() directly if bundle content is already added.
"""
return self.bundle(bundleArgs, delayTimeInSeconds=delayTimeInSeconds, sendBundleNow=True)
# -o-
def send( self,
messageOrBundleBuilder :Union[OscMessageBuilder, OscBundleBuilder],
) -> None:
self._validateClientSetup()
if not isinstance(messageOrBundleBuilder, OscMessageBuilder) \
and not isinstance(messageOrBundleBuilder, OscBundleBuilder):
log.critical("messageOrBundleBuilder IS INVALID.")
#
self._client.send(messageOrBundleBuilder.build())
if self.enablePathLogging:
self.postOSCArgs(messageOrBundleBuilder)
# -o-
def postOSCArgs( self,
messageOrBundleBuilder :Union[OscMessageBuilder, OscBundleBuilder],
) -> None:
"""
Post OSC args via log.osc() for any OscMessageBuilder or OscBundleBuilder.
Occurs automatically when enablePathLogging is True.
"""
def postOSC(message:osc_message.OscMessage, atTimestamp:float=0) -> None:
delayString :str = ""
if atTimestamp > 0:
delayRemaining = atTimestamp - z.timeNowInSeconds()
delayString = f" :: remaining delay {delayRemaining:.3f} @ time {atTimestamp:.3f}"
log.osc(f"{message.address} {z.c2s(message._parameters)}{delayString}")
#ENDDEF -- postOSC()
#
def findMessageInBundle( bundleOrMessage:Union[osc_message.OscMessage,osc_bundle.OscBundle],
atTimestamp :float = 0,
) -> None:
#
if isinstance(bundleOrMessage, osc_message.OscMessage):
postOSC(bundleOrMessage, atTimestamp)
# Unwrap bundle to find messages.
# NB Getter bug: OscBundle.timestamp()->int !
#
else:
for _ in bundleOrMessage._contents:
if isinstance(_, osc_message.OscMessage):
postOSC(_, bundleOrMessage._timestamp)
else:
findMessageInBundle(_, _._timestamp)
#ENDDEF -- findMessageInBundle()
#
mos = messageOrBundleBuilder.build()
findMessageInBundle(mos)
#ENDDEF -- postOSCArgs()
#----------------------------------------------- -o--
# Client protected methods.
# -o-
def _validateClientSetup(self):
if not self._client:
log.critical("Client is UNDEFINED.")
#=============================================== -o--
# Server public attributes.
enablePathHandlerDefault :bool = True #DEFAULT
# createServer() automatically defines a method to capture oscPaths
# that are not named by a custom handler.
#
# If False, the oscPath handler default returns before taking action.
# If set False before calling createServer(), the oscPath handler
# default will not be created.
pathHandlerDefaultFunction :FunctionType = None #DEFAULT
# Run a function for every oscPath captured by the default handler.
# See _pathHandlerDefault() for function signature.
enableSourceAddrLogging :bool = True #DEFAULT
# Log the source hostname and port. In the oscPath default
# handler, this is logged with oscPath.
#----------------------------------------------- -o--
# Server protected attributes.
_server :osc_server.ThreadingOSCUDPServer = None
_dispatcher :dispatcher.Dispatcher = None
#
_pathHandlersReceiveSourceAddr :bool = True #DEFAULT
# NB This value is used when the Dispatcher creates a handler.
# See createServer() and addPathHandler().
#
# By DEFAULT, all handlers receive the OSC path source address
# information. To prevent the logging of source address, set
# enableSourceAddrLogging to False.
_isServerRunning :bool = False
# True if server is running.
_willDestroyServer :bool = False
# Indicate that server is schedule for destruction.
# In this state, it shall not be restarted.
#----------------------------------------------- -o--
# Server public methods.
#
# One server and one dispatcher per class instance.
# Dispatcher can be updated, even after server is running.
#
# Server instance runs as ThreadingOSCUDPServer.
# pythonosc also offers:
# . AsyncIOOSCUDPServer
# . BlockingOSCUDPServer
# . ForkingOSCUDPServer
#
# -o-
def createServer( self,
hostname :str = None,
port :int = None,
) -> None:
"""
Create server without starting it.
Server is always created with a dispatcher.
Dispatcher is created by DEFAULT and set to default oscPath
handler, which user may choose to disable.
"""
if self._server:
log.critical("Server is ALREADY CREATED.", exitValue=1)
self._validateHostnameAndPort(hostname, port)
#
self._dispatcher = dispatcher.Dispatcher()
if self.enablePathHandlerDefault:
self._dispatcher.set_default_handler(
self._pathHandlerDefault,
needs_reply_address=self._pathHandlersReceiveSourceAddr )
#
try:
self._server = osc_server.ThreadingOSCUDPServer(
(self.hostname, self.port), self._dispatcher )
except Exception as e:
if 48 == e.errno:
log.critical( "Server ALREADY RUNNING on " +
f"{self.hostname}:{self.port}.",
exitValue=1 )
else:
log.critical(e, exitValue=1)
#ENDDEF -- createServer()
# -o-
def destroyServer(self) -> None:
"""
Destroy server, dispatcher, all oscPath handlers and default
handler function.
"""
self._validateServerSetup()
self._willDestroyServer = True
self.stopServer()
self._dispatcher.set_default_handler(None)
self._dispatcher = None
self._server = None
self._willDestroyServer = False
# -o-
def startServer(self) -> None:
self._validateServerSetup()
#
if self._isServerRunning:
log.warning("Server is ALREADY RUNNING at %s:%s..." % (self.hostname, self.port))
return
if self._willDestroyServer:
log.warning("Server at %s:%s is SCHEDULED FOR DESTRUCTION..." % (self.hostname, self.port))
return
#
log.info("Server STARTING at %s:%s..." % (self.hostname, self.port))
self._isServerRunning = True
self._server.serve_forever()
self._isServerRunning = False
# -o-
def stopServer(self) -> None:
self._validateServerSetup()
if self._isServerRunning:
self._server.shutdown()
self._isServerRunning = False
log.info("...Server at %s:%s is STOPPED." % (self.hostname, self.port))
else:
log.info("Server at %s:%s is ALREADY STOPPED." % (self.hostname, self.port))
# -o-
def addPathHandler( self,
oscPath :str,
oscPathHandler :FunctionType,
*userArgs :List[Any]
) -> None:
"""
Give OSC path handlers a simple signature, and use parseEventArgs()
to resolve essential parameters:
def handlerFunction(*eventArgs):
sourceHostname, sourcePort, oscPath, oscArgs, userArgs = \\
self.parseEventArgs(eventArgs, postOSCPath=True)
...
userArgs -- Arbitrary parameters or (function) pointers defined by
addPathHandler() invocation.
NB--
* Incoming OSC path will match all valid handlers.
* Use globbing in OSC path names to match multiple incoming OSC paths.
* Optionally use default handler function to capture unmatched OSC paths.
Redirect stderr to squelch DEBUG messages from default handler.
"""
self._validateServerSetup()
self._validateOSCPath(oscPath)
if self._isServerRunning:
log.error(f"CANNOT add or remove OSC path handlers while SERVER IS RUNNING. ({oscPath})")
return
#
self._dispatcher.map( oscPath,
oscPathHandler,
userArgs,
needs_reply_address=self._pathHandlersReceiveSourceAddr )
log.info(f"Added OSC path handler \"{oscPath}\".")
# -o-
def removePathHandler(self, oscPath:str) -> None:
self._validateServerSetup()
self._validateOSCPath(oscPath)
if self._isServerRunning:
log.error(f"CANNOT add or remove OSC path handlers while SERVER IS RUNNING. ({oscPath})")
return
#
try:
self._dispatcher._map.pop(oscPath)
log.info(f"Removed OSC path handler \"{oscPath}\".")
except KeyError:
log.error(f"oscPath DOES NOT EXIST. ({oscPath})")
except Exception as e:
log.critical(e, exitValue=1)
# -o-
def listPathHandlers(self) -> None:
self._validateServerSetup()
registeredOSCPaths :List[str] = list(self._dispatcher._map.keys())
log.info(dump.listo(registeredOSCPaths, title="OSC Path Handlers", sort=True))
# -o-
def parseEventArgs( self,
eventArgs :Tuple[Any],
expectUserArgs :bool = True,
postOSCPath :bool = True,
) -> Tuple[str, int, str, List[Any], List[Any]]:
"""
RETURNS: Tuple[str, int, str, List[Any], List[Any]]
:: (sourceHostname, sourcePort, oscPath, oscArgs, userArgs)
Optionally post oscPath via log.osc().
Returns components of OSC event in a tuple.
expectUserArgs -- Then True (DEFAULT), expect additional arguments
from custom OSC path handler.
postOSCPath -- Local toggle, override global toggle, for posting OSC path.
See also public attributes: enablePathLogging, enableSourceAddrLogging.
NB Whether MOSOSC returns source hostname/port to every handler
is determined by MOSOSC._pathHandlersReceiveSourceAddr (DEFAULT:True).
"""
sourceHostname :str = None
sourcePort :int = None
oscPath :str = None
userArgs :List[Any] = []
oscArgs :List[Any] = []
eventList :List[Any] = list(eventArgs)
sourceAddrString :str = ""
# ASSUME eventArgs tuple is of the form...
#
# ( [sourceAddrTuple], oscPath, [userArgsTuple], oscArgsTuple )
#
# ...where:
# * sourceAddrTuple exists if _pathHandlersReceiveSourceAddr is True;
# * userAgrs exists if called from a custom oscPath handler.
#
if isinstance(eventList[0], tuple):
sourceHostname, sourcePort = eventList.pop(0)
if self.enableSourceAddrLogging:
sourceAddrString = f" :: {sourceHostname}:{sourcePort}"
oscPath = eventList.pop(0) + " "
if expectUserArgs:
userArgs = list(eventList.pop(0)[0])
oscArgs = eventList
#
if self.enablePathLogging and postOSCPath: # Global and local toggles.
log.osc(f"{oscPath}{z.c2s(oscArgs)}{sourceAddrString}")
return (sourceHostname, sourcePort, oscPath.strip(), oscArgs, userArgs)
#----------------------------------------------- -o--
# Server protected methods.
# -o-
# ASSUME If Server is defined, then so also is all Server support,
# including Dispatcher and default oscPath handler.
#
def _validateServerSetup(self):
if not self._server:
log.critical("Server is UNDEFINED.")
# -o-
# NB First argument represents working instance of this class,
# passed in by calling environment.
#
# Q Impossible to get same result by passing default handler into
# class? Handlers fail to recognize postSourceAddr, and lose further
# information when postSourceAddr is not enabled.
#
def _pathHandlerDefault( mososc,
*eventArgs :Tuple[Any]
) -> None:
"""
If pathHandlerDefaultFunction is defined as a function, it will be
called if enablePathHandlerDefault is True.
pathHandlerDefaultFunction() REQUIRES the following signature:
pathHandlerDefaultFunction( mososc,
sourceHostname :str,
sourcePort :int,
oscPath :str,
oscArgs :List[Any],
) -> None
mososc -- Same instance of MOSOSC as contains all other methods.
sourceHostname / sourcePort -- Network origin of the oscPath sent
to the server. Available when _pathHandlersReceiveSourceAddr
is True.
oscPath / oscArgs -- OSC pathname and associated arguments.
oscArgs is List of zero (0) or more elements.
See also public attributes: enablePathHandlerDefault, pathHandlerDefaultFunction.
"""
if not mososc.enablePathHandlerDefault: return
sourceHostname, sourcePort, oscPath, oscArgs, _ = \
mososc.parseEventArgs(eventArgs, expectUserArgs=False)
if mososc.pathHandlerDefaultFunction:
mososc.pathHandlerDefaultFunction(mososc, sourceHostname, sourcePort, oscPath, oscArgs)
#ENDCLASS -- MOSOSC() | en | 0.566771 | # -o-- MOSOSC.py (class) Wrapper for https://pypi.org/project/python-osc, version 1.8.0. Backwards compatible to (at least), version 1.7.4. Provides control over creation and management of... * OSC client and server * incrementally aggregated messages and bundles * sending to OSC paths * receiving with custom OSC path handlers * automated OSC path logging on send and receive * function hook for default path processing Choices for this initial API are in the service of a simple, unified interface to the larger offering of pythonosc. MOSOSC does not comprehensively represent the whole of pythonosc. Resources: * https://en.wikipedia.org/wiki/Open_Sound_Control * opensoundcontrol.org * https://web.archive.org/web/20030914224904/http://cnmat.berkeley.edu/OSC/OSC-spec.html * https://www.linuxjournal.com/content/introduction-osc #--------------------------------------------------------------------- # Copyright (C) <NAME> 2021. <EMAIL> # Distributed under the Boost Software License, Version 1.0. # (See ./LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) #--------------------------------------------------------------------- #RELEASE #----------------------------------------- -o-- # Modules. # # # NB Suggested invocation of MOSLog for logging MOSLog.osc(). #----------------------------------------- -o-- SHARED ATTRIBUTES-- hostname port enablePathLogging CLIENT METHODS-- createClient() destroyClient() message() messageAdd() messageSend() bundle() bundleAdd() bundleSend() send() postOSCArgs() SERVER METHODS-- createServer() destroyServer() startServer() stopServer() addPathHandler() removePathHandler() listPathHandlers() parseEventArgs() SERVER ATTRIBUTES-- enablePathHandlerDefault pathHandlerDefaultFunction enableSourceAddrLogging NB All OSC paths must begin with slash and be at least one character long. ("/?") NB Message and bundle creation is composable... message() + [messageAdd()] + send() ...or just one call: messageSend(). Bundles are similar. NB * Incoming OSC path will match all valid handlers. * Use globbing in OSC path names to match multiple incoming OSC paths. * Optionally use default handler function to capture unmatched OSC paths. Redirect stderr to squelch DEBUG messages from default handler. ASSUME Each MOSOSC instance is used ONLY as client or as server. See class header and pydoc for full details. #=============================================== -o-- # Shared public attributes. # NB hostname and port are effectively read-only. # Set them is via input to the class constructor, # createServer() or createClient(). # #DEFAULT # Log the oscPath and associated arguments with log.osc(). # Use this attributes in custom oscPath handlers to unify logging # control across all handlers. #----------------------------------------------- -o-- # Shared protected attributes. #DEFAULT #DEFAULT #----------------------------------------------- -o-- # Lifecycle. # -o- hostname and port define server target. Public attributes hostname and port shared between client and server. ASSUME Each MOSOSC instance is used ONLY as client or as server. #----------------------------------------------- -o-- # Shared protected methods. # -o- # NB Checks for type and syntax. # XXX No checks for connectivity. # # # #ENDDEF -- _validateHostnameAndPort() # -o- # OSC paths must begin with slash ("/") and be at least two characters long. # #=============================================== -o-- # Client protected attributes. #----------------------------------------------- -o-- # Client public methods. # -o- # Client runs as UDPClient. pythonosc also offers SimpleUDPClient. # One client per instance. Client sends to server at hostname:port. # # -o- # -o- NB Removes instances of None from messageArgs. # # # -o- NB Removes instances of None from messageArgs. # # -o- # -o- #NB osc_bundle_builder.IMMEDIATELY, When delayTimeInSeconds is zero (0), the received OSC message is executed immediately. Otherwise, delay execution for N seconds. Per OSC standard. # # # XXX Never reached. # # -o- # -o- #NB osc_bundle_builder.IMMEDIATELY NB bundleSend() with no bundleArgs will fail. Use send() directly if bundle content is already added. # -o- # # -o- Post OSC args via log.osc() for any OscMessageBuilder or OscBundleBuilder. Occurs automatically when enablePathLogging is True. #ENDDEF -- postOSC() # # # Unwrap bundle to find messages. # NB Getter bug: OscBundle.timestamp()->int ! # #ENDDEF -- findMessageInBundle() # #ENDDEF -- postOSCArgs() #----------------------------------------------- -o-- # Client protected methods. # -o- #=============================================== -o-- # Server public attributes. #DEFAULT # createServer() automatically defines a method to capture oscPaths # that are not named by a custom handler. # # If False, the oscPath handler default returns before taking action. # If set False before calling createServer(), the oscPath handler # default will not be created. #DEFAULT # Run a function for every oscPath captured by the default handler. # See _pathHandlerDefault() for function signature. #DEFAULT # Log the source hostname and port. In the oscPath default # handler, this is logged with oscPath. #----------------------------------------------- -o-- # Server protected attributes. # #DEFAULT # NB This value is used when the Dispatcher creates a handler. # See createServer() and addPathHandler(). # # By DEFAULT, all handlers receive the OSC path source address # information. To prevent the logging of source address, set # enableSourceAddrLogging to False. # True if server is running. # Indicate that server is schedule for destruction. # In this state, it shall not be restarted. #----------------------------------------------- -o-- # Server public methods. # # One server and one dispatcher per class instance. # Dispatcher can be updated, even after server is running. # # Server instance runs as ThreadingOSCUDPServer. # pythonosc also offers: # . AsyncIOOSCUDPServer # . BlockingOSCUDPServer # . ForkingOSCUDPServer # # -o- Create server without starting it. Server is always created with a dispatcher. Dispatcher is created by DEFAULT and set to default oscPath handler, which user may choose to disable. # # #ENDDEF -- createServer() # -o- Destroy server, dispatcher, all oscPath handlers and default handler function. # -o- # # # -o- # -o- Give OSC path handlers a simple signature, and use parseEventArgs() to resolve essential parameters: def handlerFunction(*eventArgs): sourceHostname, sourcePort, oscPath, oscArgs, userArgs = \\ self.parseEventArgs(eventArgs, postOSCPath=True) ... userArgs -- Arbitrary parameters or (function) pointers defined by addPathHandler() invocation. NB-- * Incoming OSC path will match all valid handlers. * Use globbing in OSC path names to match multiple incoming OSC paths. * Optionally use default handler function to capture unmatched OSC paths. Redirect stderr to squelch DEBUG messages from default handler. # # -o- # # -o- # -o- RETURNS: Tuple[str, int, str, List[Any], List[Any]] :: (sourceHostname, sourcePort, oscPath, oscArgs, userArgs) Optionally post oscPath via log.osc(). Returns components of OSC event in a tuple. expectUserArgs -- Then True (DEFAULT), expect additional arguments from custom OSC path handler. postOSCPath -- Local toggle, override global toggle, for posting OSC path. See also public attributes: enablePathLogging, enableSourceAddrLogging. NB Whether MOSOSC returns source hostname/port to every handler is determined by MOSOSC._pathHandlersReceiveSourceAddr (DEFAULT:True). # ASSUME eventArgs tuple is of the form... # # ( [sourceAddrTuple], oscPath, [userArgsTuple], oscArgsTuple ) # # ...where: # * sourceAddrTuple exists if _pathHandlersReceiveSourceAddr is True; # * userAgrs exists if called from a custom oscPath handler. # # # Global and local toggles. #----------------------------------------------- -o-- # Server protected methods. # -o- # ASSUME If Server is defined, then so also is all Server support, # including Dispatcher and default oscPath handler. # # -o- # NB First argument represents working instance of this class, # passed in by calling environment. # # Q Impossible to get same result by passing default handler into # class? Handlers fail to recognize postSourceAddr, and lose further # information when postSourceAddr is not enabled. # If pathHandlerDefaultFunction is defined as a function, it will be called if enablePathHandlerDefault is True. pathHandlerDefaultFunction() REQUIRES the following signature: pathHandlerDefaultFunction( mososc, sourceHostname :str, sourcePort :int, oscPath :str, oscArgs :List[Any], ) -> None mososc -- Same instance of MOSOSC as contains all other methods. sourceHostname / sourcePort -- Network origin of the oscPath sent to the server. Available when _pathHandlersReceiveSourceAddr is True. oscPath / oscArgs -- OSC pathname and associated arguments. oscArgs is List of zero (0) or more elements. See also public attributes: enablePathHandlerDefault, pathHandlerDefaultFunction. #ENDCLASS -- MOSOSC() | 1.498137 | 1 |
tests/test_plots.py | ahoetker/pinch-analysis | 1 | 6612620 | <filename>tests/test_plots.py<gh_stars>1-10
import pytest
import numpy as np
from pinch import ureg, Q_
from pinch.plots import (
cold_composite,
combined_composite,
grand_composite,
hot_composite,
)
def test_cold_composite():
cold_temp = Q_(np.array([30.30, 106.70, 240.00]), "celsius")
enth = Q_(np.array([187479.5040, 246613.1040, 453441.3840]), "MJ")
cold_composite(enth, cold_temp, show=False)
def test_grand_composite():
temp = Q_(
np.array([40.30, 45.00, 52.10, 116.70, 159.20, 206.10, 240.00, 250.00]),
"celsius",
)
enth = Q_(
np.array(
[
187479.504,
191117.304,
1.640166e5,
5.627682e4,
18443.376,
512.568,
0,
15516.0,
]
),
"MJ",
)
grand_composite(enth, temp, show=False)
def test_combined_composite():
cold_enth = Q_(np.array([187479.5040, 246613.1040, 453441.3840]), "MJ")
cold_temp = Q_(np.array([30.30, 106.70, 240.00]), "celsius")
hot_enth = Q_(np.array([0, 3.259609e4, 294112.728, 384813.576, 437925.384]), "MJ")
hot_temp = Q_(np.array([45.00, 52.10, 159.20, 206.10, 240.00]), "celsius")
enth = Q_(np.array([0, 3.259609e4, 294112.728, 384813.576, 437925.384]), "MJ")
combined_composite(cold_enth, hot_enth, cold_temp, hot_temp, show=False)
def test_hot_composite():
hot_temp = Q_(np.array([45.00, 52.10, 159.20, 206.10, 240.00]), "celsius")
enth = Q_(np.array([0, 3.259609e4, 294112.728, 384813.576, 437925.384]), "MJ")
hot_composite(enth, hot_temp, show=False)
| <filename>tests/test_plots.py<gh_stars>1-10
import pytest
import numpy as np
from pinch import ureg, Q_
from pinch.plots import (
cold_composite,
combined_composite,
grand_composite,
hot_composite,
)
def test_cold_composite():
cold_temp = Q_(np.array([30.30, 106.70, 240.00]), "celsius")
enth = Q_(np.array([187479.5040, 246613.1040, 453441.3840]), "MJ")
cold_composite(enth, cold_temp, show=False)
def test_grand_composite():
temp = Q_(
np.array([40.30, 45.00, 52.10, 116.70, 159.20, 206.10, 240.00, 250.00]),
"celsius",
)
enth = Q_(
np.array(
[
187479.504,
191117.304,
1.640166e5,
5.627682e4,
18443.376,
512.568,
0,
15516.0,
]
),
"MJ",
)
grand_composite(enth, temp, show=False)
def test_combined_composite():
cold_enth = Q_(np.array([187479.5040, 246613.1040, 453441.3840]), "MJ")
cold_temp = Q_(np.array([30.30, 106.70, 240.00]), "celsius")
hot_enth = Q_(np.array([0, 3.259609e4, 294112.728, 384813.576, 437925.384]), "MJ")
hot_temp = Q_(np.array([45.00, 52.10, 159.20, 206.10, 240.00]), "celsius")
enth = Q_(np.array([0, 3.259609e4, 294112.728, 384813.576, 437925.384]), "MJ")
combined_composite(cold_enth, hot_enth, cold_temp, hot_temp, show=False)
def test_hot_composite():
hot_temp = Q_(np.array([45.00, 52.10, 159.20, 206.10, 240.00]), "celsius")
enth = Q_(np.array([0, 3.259609e4, 294112.728, 384813.576, 437925.384]), "MJ")
hot_composite(enth, hot_temp, show=False)
| none | 1 | 1.991443 | 2 | |
main.py | shreemantolahiri/Object-Detection | 1 | 6612621 | <gh_stars>1-10
import cv2
import matplotlib.pyplot as plt
config_file = "ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt"
frozen_model = "frozen_inference_graph.pb"
model = cv2.dnn_DetectionModel(frozen_model,config_file)
classLabels = []
file_name = 'names.txt'
with open(file_name, 'rt') as abc:
classLabels = abc.read().rstrip('\n').split('\n')
print(classLabels)
model.setInputSize(320,320)
model.setInputScale(1.0/127.5)
model.setInputMean((127.5,127.5,127.5)) #mobilenet---> [-1,1]
model.setInputSwapRB(True) #automatic conversion color
''''''
'''img= cv2.imread('test1.jpg') #forimage mode
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
font_scale = 3
font = cv2.FONT_HERSHEY_PLAIN
for ClassInd, conf, boxes in zip(ClassIndex.flatten(), confidece.flatten(), bbox):
cv2.rectangle(img, boxes, (255, 0, 0), 2)
cv2.putText(img, classLabels[ClassInd - 1], (boxes[0] + 10, boxes[1] + 40), font, fontScale=font_scale,
color=(0, 255, 0), thickness=3)
cv2.waitKey(0)'''
#video
cap=cv2.VideoCapture("test.mp4")
if not cap.isOpened():
cap= cv2.VideoCapture(1)
if not cap.isOpened():
raise IOError("Cannot open video!")
font_scale = 3
font = cv2.FONT_HERSHEY_PLAIN
'''frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
size = (frame_width, frame_height)
result = cv2.VideoWriter('output.avi',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)'''
while True:
ret,frame= cap.read()
ClassIndex, confidece, bbox= model.detect(frame, confThreshold= 0.55)
print(ClassIndex)
print(ClassIndex)
if(len(ClassIndex)!=0):
for ClassInd, conf, boxes in zip(ClassIndex.flatten(), confidece.flatten(), bbox):
cv2.rectangle(frame, boxes, (255, 0, 0), 2)
cv2.putText(frame, classLabels[ClassInd - 1].upper(), (boxes[0] + 10, boxes[1] + 30), font, fontScale=font_scale,
color=(0, 255, 0), thickness=2)
'''cv2.putText(frame, (str(confidece*100),2),(boxes[0]+200,boxes[1]+30), cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)'''
cv2.imshow('Object Detection', frame)
if cv2.waitKey(2) & 0xFF== ord('q'):
break
cap.release()
'''result.release()'''
cv2.destroyAllWindows
| import cv2
import matplotlib.pyplot as plt
config_file = "ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt"
frozen_model = "frozen_inference_graph.pb"
model = cv2.dnn_DetectionModel(frozen_model,config_file)
classLabels = []
file_name = 'names.txt'
with open(file_name, 'rt') as abc:
classLabels = abc.read().rstrip('\n').split('\n')
print(classLabels)
model.setInputSize(320,320)
model.setInputScale(1.0/127.5)
model.setInputMean((127.5,127.5,127.5)) #mobilenet---> [-1,1]
model.setInputSwapRB(True) #automatic conversion color
''''''
'''img= cv2.imread('test1.jpg') #forimage mode
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
font_scale = 3
font = cv2.FONT_HERSHEY_PLAIN
for ClassInd, conf, boxes in zip(ClassIndex.flatten(), confidece.flatten(), bbox):
cv2.rectangle(img, boxes, (255, 0, 0), 2)
cv2.putText(img, classLabels[ClassInd - 1], (boxes[0] + 10, boxes[1] + 40), font, fontScale=font_scale,
color=(0, 255, 0), thickness=3)
cv2.waitKey(0)'''
#video
cap=cv2.VideoCapture("test.mp4")
if not cap.isOpened():
cap= cv2.VideoCapture(1)
if not cap.isOpened():
raise IOError("Cannot open video!")
font_scale = 3
font = cv2.FONT_HERSHEY_PLAIN
'''frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
size = (frame_width, frame_height)
result = cv2.VideoWriter('output.avi',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)'''
while True:
ret,frame= cap.read()
ClassIndex, confidece, bbox= model.detect(frame, confThreshold= 0.55)
print(ClassIndex)
print(ClassIndex)
if(len(ClassIndex)!=0):
for ClassInd, conf, boxes in zip(ClassIndex.flatten(), confidece.flatten(), bbox):
cv2.rectangle(frame, boxes, (255, 0, 0), 2)
cv2.putText(frame, classLabels[ClassInd - 1].upper(), (boxes[0] + 10, boxes[1] + 30), font, fontScale=font_scale,
color=(0, 255, 0), thickness=2)
'''cv2.putText(frame, (str(confidece*100),2),(boxes[0]+200,boxes[1]+30), cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)'''
cv2.imshow('Object Detection', frame)
if cv2.waitKey(2) & 0xFF== ord('q'):
break
cap.release()
'''result.release()'''
cv2.destroyAllWindows | en | 0.183351 | #mobilenet---> [-1,1] #automatic conversion color img= cv2.imread('test1.jpg') #forimage mode
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
font_scale = 3
font = cv2.FONT_HERSHEY_PLAIN
for ClassInd, conf, boxes in zip(ClassIndex.flatten(), confidece.flatten(), bbox):
cv2.rectangle(img, boxes, (255, 0, 0), 2)
cv2.putText(img, classLabels[ClassInd - 1], (boxes[0] + 10, boxes[1] + 40), font, fontScale=font_scale,
color=(0, 255, 0), thickness=3)
cv2.waitKey(0) #video frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
size = (frame_width, frame_height)
result = cv2.VideoWriter('output.avi',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size) cv2.putText(frame, (str(confidece*100),2),(boxes[0]+200,boxes[1]+30), cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2) result.release() | 2.580298 | 3 |
ch5_sphere.py | davraamides/raytrace | 0 | 6612622 | """
"""
import math
from PIL import Image
from tuples import Point
from matrix import Matrix
from sphere import Sphere
from ray import Ray
W = 200
H = 200
D = 200
im = Image.new('RGB', (W, H))
pix = im.load()
if False:
## this is done in image coordinates
eye = Point(W / 2, H / 2, D)
sphere = Sphere()
ts = Matrix.scale(50, 50, 50)
tt = Matrix.translate(W / 2, H / 2, D / 4)
sphere.transform = tt * ts
for x in range(W):
for y in range(H):
ray = Ray(eye, Point(x, y, 0) - eye)
xs = sphere.intersect(ray)
if xs:
pix[x, y] = (255, 0, 0)
print(x)
else:
## this is done in object coordinates
eye = Point(0, 0, -5)
sphere = Sphere()
tt = Matrix.translate(0, 0, -2)
sphere.transform = tt
wall = (-3, 3, -3, 3) # LRBT
ms = Matrix.scale(float(wall[1] - wall[0]) / W, float(wall[3] - wall[2]) / H, 1.0)
mt = Matrix.translate(wall[0], wall[2], 0)
m = mt * ms
for x in range(W):
#xobj = float(x * (wall[1] - wall[0])) / W + wall[0]
for y in range(H):
#yobj = float(y * (wall[3] - wall[2])) / H + wall[2]
p = m * Point(x, y, 0)
ray = Ray(eye, p - eye)
xs = sphere.intersect(ray)
if xs:
pix[x, y] = (255, 0, 0)
print(x)
im.show()
| """
"""
import math
from PIL import Image
from tuples import Point
from matrix import Matrix
from sphere import Sphere
from ray import Ray
W = 200
H = 200
D = 200
im = Image.new('RGB', (W, H))
pix = im.load()
if False:
## this is done in image coordinates
eye = Point(W / 2, H / 2, D)
sphere = Sphere()
ts = Matrix.scale(50, 50, 50)
tt = Matrix.translate(W / 2, H / 2, D / 4)
sphere.transform = tt * ts
for x in range(W):
for y in range(H):
ray = Ray(eye, Point(x, y, 0) - eye)
xs = sphere.intersect(ray)
if xs:
pix[x, y] = (255, 0, 0)
print(x)
else:
## this is done in object coordinates
eye = Point(0, 0, -5)
sphere = Sphere()
tt = Matrix.translate(0, 0, -2)
sphere.transform = tt
wall = (-3, 3, -3, 3) # LRBT
ms = Matrix.scale(float(wall[1] - wall[0]) / W, float(wall[3] - wall[2]) / H, 1.0)
mt = Matrix.translate(wall[0], wall[2], 0)
m = mt * ms
for x in range(W):
#xobj = float(x * (wall[1] - wall[0])) / W + wall[0]
for y in range(H):
#yobj = float(y * (wall[3] - wall[2])) / H + wall[2]
p = m * Point(x, y, 0)
ray = Ray(eye, p - eye)
xs = sphere.intersect(ray)
if xs:
pix[x, y] = (255, 0, 0)
print(x)
im.show()
| en | 0.464977 | ## this is done in image coordinates ## this is done in object coordinates # LRBT #xobj = float(x * (wall[1] - wall[0])) / W + wall[0] #yobj = float(y * (wall[3] - wall[2])) / H + wall[2] | 3.057128 | 3 |
server/chapters/migrations/0004_page_content.py | nickdotreid/opioid-mat-decision-aid | 0 | 6612623 | <reponame>nickdotreid/opioid-mat-decision-aid
# Generated by Django 2.2.1 on 2019-05-15 18:06
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('chapters', '0003_auto_20190513_1621'),
]
operations = [
migrations.AddField(
model_name='page',
name='content',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
]
| # Generated by Django 2.2.1 on 2019-05-15 18:06
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('chapters', '0003_auto_20190513_1621'),
]
operations = [
migrations.AddField(
model_name='page',
name='content',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
] | en | 0.607766 | # Generated by Django 2.2.1 on 2019-05-15 18:06 | 1.562935 | 2 |
hyperbox_app/medmnist/datamodules/__init__.py | marsggbo/hyperbox_app | 1 | 6612624 | from .ct_data import *
from .utils import *
| from .ct_data import *
from .utils import *
| none | 1 | 1.082926 | 1 | |
lintcode/medium/intersection_of_two_linked_lists/py/intersection_of_two_linked_lists.py | lilsweetcaligula/Online-Judges | 0 | 6612625 | <filename>lintcode/medium/intersection_of_two_linked_lists/py/intersection_of_two_linked_lists.py
# coding:utf-8
'''
@Copyright:LintCode
@Author: lilsweetcaligula
@Problem: http://www.lintcode.com/problem/intersection-of-two-linked-lists
@Language: Python
@Datetime: 17-02-16 16:50
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param headA: the first list
# @param headB: the second list
# @return: a ListNode
def getIntersectionNode(self, headA, headB):
if headA == None or headB == None:
return None
first = headA
last = headA
while last.next != None:
last = last.next
# Temporarily make a cycle. We will remove
# the cycle once we check the status of the
# intersection.
last.next = first
slow = headB
fast = headB
while fast != None and fast.next != None:
slow = slow.next
fast = fast.next.next
if fast == slow:
break
if fast == slow:
# There exists an intersection between
# the two lists.
slow = headB
while fast != slow:
slow = slow.next
fast = fast.next
# The intersection node is now the one
# pointed to by the "slow" pointer. We
# now restore the original structure of
# the lists.
last.next = None
return slow
# There exists no intersection between
# the two lists. Restore the original
# structure and return.
last.next = None
return None
| <filename>lintcode/medium/intersection_of_two_linked_lists/py/intersection_of_two_linked_lists.py
# coding:utf-8
'''
@Copyright:LintCode
@Author: lilsweetcaligula
@Problem: http://www.lintcode.com/problem/intersection-of-two-linked-lists
@Language: Python
@Datetime: 17-02-16 16:50
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param headA: the first list
# @param headB: the second list
# @return: a ListNode
def getIntersectionNode(self, headA, headB):
if headA == None or headB == None:
return None
first = headA
last = headA
while last.next != None:
last = last.next
# Temporarily make a cycle. We will remove
# the cycle once we check the status of the
# intersection.
last.next = first
slow = headB
fast = headB
while fast != None and fast.next != None:
slow = slow.next
fast = fast.next.next
if fast == slow:
break
if fast == slow:
# There exists an intersection between
# the two lists.
slow = headB
while fast != slow:
slow = slow.next
fast = fast.next
# The intersection node is now the one
# pointed to by the "slow" pointer. We
# now restore the original structure of
# the lists.
last.next = None
return slow
# There exists no intersection between
# the two lists. Restore the original
# structure and return.
last.next = None
return None
| en | 0.774228 | # coding:utf-8 @Copyright:LintCode @Author: lilsweetcaligula @Problem: http://www.lintcode.com/problem/intersection-of-two-linked-lists @Language: Python @Datetime: 17-02-16 16:50 # Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None # @param headA: the first list # @param headB: the second list # @return: a ListNode # Temporarily make a cycle. We will remove # the cycle once we check the status of the # intersection. # There exists an intersection between # the two lists. # The intersection node is now the one # pointed to by the "slow" pointer. We # now restore the original structure of # the lists. # There exists no intersection between # the two lists. Restore the original # structure and return. | 3.560496 | 4 |
ringlus/ringlus/doctype/issue/issue.py | momscode/ringlus | 0 | 6612626 | from __future__ import unicode_literals
import frappe
from frappe.model.mapper import get_mapped_doc
from frappe.model.document import Document
from frappe.model.document import get_doc
from frappe.model.document import Document
@frappe.whitelist()
def make_expense_claim(source_name, target_doc=None):
target_doc = get_mapped_doc("Issue", source_name, {
"Issue": {
"doctype": "Expense Claim",
"field_map": {
"name": "issue"
}
},
}, target_doc)
return target_doc
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
target_doc = get_mapped_doc("Issue", source_name, {
"Issue": {
"doctype": "Material Request",
"field_map": {
"name": "issue"
}
},
}, target_doc)
target_doc.material_request_type = "Material Issue"
return target_doc
@frappe.whitelist()
def get_sales_order_details(customer):
project_list1 = frappe.db.sql(""" select distinct sales_order from `tabService Level Agreement` where customer= %s""",(customer),as_dict=1)
return project_list1 | from __future__ import unicode_literals
import frappe
from frappe.model.mapper import get_mapped_doc
from frappe.model.document import Document
from frappe.model.document import get_doc
from frappe.model.document import Document
@frappe.whitelist()
def make_expense_claim(source_name, target_doc=None):
target_doc = get_mapped_doc("Issue", source_name, {
"Issue": {
"doctype": "Expense Claim",
"field_map": {
"name": "issue"
}
},
}, target_doc)
return target_doc
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
target_doc = get_mapped_doc("Issue", source_name, {
"Issue": {
"doctype": "Material Request",
"field_map": {
"name": "issue"
}
},
}, target_doc)
target_doc.material_request_type = "Material Issue"
return target_doc
@frappe.whitelist()
def get_sales_order_details(customer):
project_list1 = frappe.db.sql(""" select distinct sales_order from `tabService Level Agreement` where customer= %s""",(customer),as_dict=1)
return project_list1 | en | 0.639099 | select distinct sales_order from `tabService Level Agreement` where customer= %s | 1.937023 | 2 |
lib/main.py | nkrios/kacak | 1 | 6612627 | <filename>lib/main.py<gh_stars>1-10
__VERSION__ = '2.0'
__AUTHOR__ = 'Galkan'
__DATE__ = '2014'
try:
import sys
import argparse
import os
import re
from nmap import Nmap
from common import *
except ImportError,e:
import sys
sys.stdout.write("%s\n" %e)
sys.exit(1)
class AddressAction(argparse.Action):
def is_file_exists(self, file_list):
for file in file_list[0],file_list[2]:
if not re.match("/", file):
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + "%s: Full Path Must Be Used </usr/local/data/data.txt>"% (file) + bcolors.ENDC
sys.exit(2)
for file in file_list:
if not os.path.exists(file):
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + "The file \"%s\" doesn't Exists On The System !!!"% (file) + bcolors.ENDC
sys.exit(3)
def __call__(self, parser, args, values, option = None):
args.options = values
if args.domain and not len(args.options) == 3:
parser.error("Usage --domain <users_file> <config_file> <ip_file>")
elif args.mimikatz and not len(args.options) == 1:
parser.error("Usage --mimikatz <mimikatz_result_file>")
if args.domain:
self.is_file_exists(args.options)
class Main:
"""
Main Class for Kacak
"""
def __init__(self):
description = "Enumerate Users for windows based networks"
parser = argparse.ArgumentParser(description = description)
group_parser = parser.add_mutually_exclusive_group(required=True)
group_parser.add_argument('--domain', dest = 'domain', action = 'store_const', const = 'domain', help = "Road to Domain Admin ")
group_parser.add_argument('--mimikatz', dest = 'mimikatz', action = 'store_const', const = 'mimikatz', help = "Parse Mimikatz Results")
group_parser.add_argument('--08_067', dest = 'smbvuln', action = 'store', nargs = 1, help = "Discover the 08_067")
parser.add_argument('--thread', '-t', dest = 'thread', action = 'store', help = "Thread Number")
parser.add_argument('--output', '-o', dest = 'output_file', action = 'store', help = "File to Save Results")
parser.add_argument('options', nargs='*', action = AddressAction)
parser.add_argument('--verbose', '-v', action = 'store', dest = 'verbose', type = int)
self.args = parser.parse_args()
if self.args.smbvuln and not self.args.thread:
print >> sys.stderr, bcolors.OKBLUE + "Usage Error:" + bcolors.ENDC + bcolors.FAIL + "-t expects one argument" + bcolors.ENDC
sys.exit(4)
elif self.args.smbvuln and not self.args.output_file:
print >> sys.stderr, bcolors.OKBLUE + "Usage Error:" + bcolors.ENDC + bcolors.FAIL + "-o expects one argument" + bcolors.ENDC
sys.exit(5)
if ( self.args.verbose ) and ( self.args.verbose < 0 or self.args.verbose > 3 ):
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + "Verbose value must be between 1 and 3" + bcolors.ENDC
sys.exit(6)
def run_domain(self):
"""
Run smb_enum_domain_users metasploit module
"""
from domain import DoMain
verbose = self.args.verbose
domain_users_file = self.args.options[0]
config_file = self.args.options[1]
ip_file = self.args.options[2]
domain = DoMain(domain_users_file, config_file, ip_file, verbose)
try:
domain.run()
except Exception, err:
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + str(err) + bcolors.ENDC
sys.exit(7)
def run_mimikatz(self):
"""
Parse mimikatz results
"""
from lib.mimikatz import Mimikatz
verbose = self.args.verbose
mimikatz_file = self.args.options[0]
mimikatz = Mimikatz(mimikatz_file)
try:
mimikatz.run()
except Exception, err:
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + str(err) + bcolors.ENDC
sys.exit(8)
def run_smbvuln(self):
"""
Discover 08_067
"""
verbose = self.args.verbose
try:
nmap = Nmap(self.args.output_file)
nmap.run(self.args.smbvuln[0], self.args.thread)
except Exception, err:
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + str(err) + bcolors.ENDC
sys.exit(9)
def run(self):
"""
Select which function to run
"""
if self.args.domain:
self.run_domain()
elif self.args.mimikatz:
self.run_mimikatz()
elif self.args.smbvuln:
self.run_smbvuln()
| <filename>lib/main.py<gh_stars>1-10
__VERSION__ = '2.0'
__AUTHOR__ = 'Galkan'
__DATE__ = '2014'
try:
import sys
import argparse
import os
import re
from nmap import Nmap
from common import *
except ImportError,e:
import sys
sys.stdout.write("%s\n" %e)
sys.exit(1)
class AddressAction(argparse.Action):
def is_file_exists(self, file_list):
for file in file_list[0],file_list[2]:
if not re.match("/", file):
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + "%s: Full Path Must Be Used </usr/local/data/data.txt>"% (file) + bcolors.ENDC
sys.exit(2)
for file in file_list:
if not os.path.exists(file):
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + "The file \"%s\" doesn't Exists On The System !!!"% (file) + bcolors.ENDC
sys.exit(3)
def __call__(self, parser, args, values, option = None):
args.options = values
if args.domain and not len(args.options) == 3:
parser.error("Usage --domain <users_file> <config_file> <ip_file>")
elif args.mimikatz and not len(args.options) == 1:
parser.error("Usage --mimikatz <mimikatz_result_file>")
if args.domain:
self.is_file_exists(args.options)
class Main:
"""
Main Class for Kacak
"""
def __init__(self):
description = "Enumerate Users for windows based networks"
parser = argparse.ArgumentParser(description = description)
group_parser = parser.add_mutually_exclusive_group(required=True)
group_parser.add_argument('--domain', dest = 'domain', action = 'store_const', const = 'domain', help = "Road to Domain Admin ")
group_parser.add_argument('--mimikatz', dest = 'mimikatz', action = 'store_const', const = 'mimikatz', help = "Parse Mimikatz Results")
group_parser.add_argument('--08_067', dest = 'smbvuln', action = 'store', nargs = 1, help = "Discover the 08_067")
parser.add_argument('--thread', '-t', dest = 'thread', action = 'store', help = "Thread Number")
parser.add_argument('--output', '-o', dest = 'output_file', action = 'store', help = "File to Save Results")
parser.add_argument('options', nargs='*', action = AddressAction)
parser.add_argument('--verbose', '-v', action = 'store', dest = 'verbose', type = int)
self.args = parser.parse_args()
if self.args.smbvuln and not self.args.thread:
print >> sys.stderr, bcolors.OKBLUE + "Usage Error:" + bcolors.ENDC + bcolors.FAIL + "-t expects one argument" + bcolors.ENDC
sys.exit(4)
elif self.args.smbvuln and not self.args.output_file:
print >> sys.stderr, bcolors.OKBLUE + "Usage Error:" + bcolors.ENDC + bcolors.FAIL + "-o expects one argument" + bcolors.ENDC
sys.exit(5)
if ( self.args.verbose ) and ( self.args.verbose < 0 or self.args.verbose > 3 ):
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + "Verbose value must be between 1 and 3" + bcolors.ENDC
sys.exit(6)
def run_domain(self):
"""
Run smb_enum_domain_users metasploit module
"""
from domain import DoMain
verbose = self.args.verbose
domain_users_file = self.args.options[0]
config_file = self.args.options[1]
ip_file = self.args.options[2]
domain = DoMain(domain_users_file, config_file, ip_file, verbose)
try:
domain.run()
except Exception, err:
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + str(err) + bcolors.ENDC
sys.exit(7)
def run_mimikatz(self):
"""
Parse mimikatz results
"""
from lib.mimikatz import Mimikatz
verbose = self.args.verbose
mimikatz_file = self.args.options[0]
mimikatz = Mimikatz(mimikatz_file)
try:
mimikatz.run()
except Exception, err:
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + str(err) + bcolors.ENDC
sys.exit(8)
def run_smbvuln(self):
"""
Discover 08_067
"""
verbose = self.args.verbose
try:
nmap = Nmap(self.args.output_file)
nmap.run(self.args.smbvuln[0], self.args.thread)
except Exception, err:
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + str(err) + bcolors.ENDC
sys.exit(9)
def run(self):
"""
Select which function to run
"""
if self.args.domain:
self.run_domain()
elif self.args.mimikatz:
self.run_mimikatz()
elif self.args.smbvuln:
self.run_smbvuln()
| en | 0.536672 | Main Class for Kacak Run smb_enum_domain_users metasploit module Parse mimikatz results Discover 08_067 Select which function to run | 2.649818 | 3 |
pypybox2d/joints/mouse.py | the-mba/Progra-Super-Mario | 0 | 6612628 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2011 <NAME> http://www.box2d.org
# Python port by <NAME> / http://pybox2d.googlecode.com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from __future__ import absolute_import
__all__ = ('MouseJoint', )
__version__ = "$Revision: 353 $"
__date__ = "$Date: 2011-07-15 17:13:40 -0400 (Fri, 15 Jul 2011) $"
# $Source$
from ..common import (PI, Vec2, Mat22, scalar_cross, is_valid_float, property)
from ..settings import EPSILON
from .joint import Joint
class MouseJoint(Joint):
"""
A mouse joint is used to make a point on a body track a
specified world point. This a soft constraint with a maximum
force. This allows the constraint to stretch and without
applying huge forces.
Creation requires a world target point, tuning parameters, and
the time step.
NOTE: this joint is not documented in the manual because it was
developed to be used in the testbed. If you want to learn how to
use the mouse joint, look at the testbed.
"""
# p = attached point, m = mouse point
# C = p - m
# Cdot = v
# = v + cross(w, r)
# J = [I r_skew]
# Identity used:
# w k % (rx i + ry j) = w * (-ry i + rx j)
def __init__(self, body, target=(0, 0), max_force = 0.0, frequency=5.0, damping_ratio=0.7):
if body is None:
raise ValueError('body must be set')
target = Vec2(*target)
if not target.valid:
raise ValueError('Invalid target')
if not is_valid_float(max_force) or max_force < 0.0:
raise ValueError('Invalid maximum force')
if not is_valid_float(frequency) or frequency < 0.0:
raise ValueError('Invalid frequency')
if not is_valid_float(damping_ratio) or damping_ratio < 0.0:
raise ValueError('Invalid damping ratio')
Joint.__init__(self, None, body, False)
self._target = target
self._local_anchor_b = body.get_local_point(target)
self._max_force = max_force
self._impulse = Vec2()
self._frequency = frequency
self._damping_ratio = damping_ratio
self._beta = 0.0
self._gamma = 0.0
def __copy__(self):
return MouseJoint(self._body_b, self._target,
self._max_force, self._frequency, self._damping_ratio)
def get_reaction_force(self, inv_dt):
"""Get the reaction force on body_b at the joint anchor in Newtons."""
return inv_dt * self._impulse
def get_reaction_torque(self, inv_dt):
"""Get the reaction torque on body_b in N*m."""
return 0.0 # inv_dt * 0.0
@property
def target(self):
"""
The target point. This is assumed to coincide with the body
anchor initially.
"""
return Vec2(*self._target)
@target.setter
def target(self, target):
if not self._body_b.awake:
self._body_b.awake = True
self._target = Vec2(*target)
@property
def max_force(self):
"""
The maximum constraint force that can be exerted
to move the candidate body. Usually you will express
as some multiple of the weight (multiplier * mass * gravity).
"""
return self._max_force
@max_force.setter
def max_force(self, max_force):
self._max_force = max_force
@property
def frequency(self):
"""The response speed"""
return self._frequency
@frequency.setter
def frequency(self, frequency):
self._frequency = frequency
@property
def damping_ratio(self):
"""The damping ratio: 0 = no damping, 1 = critical damping"""
return self._damping_ratio
@damping_ratio.setter
def damping_ratio(self, damping_ratio):
self._damping_ratio = damping_ratio
def _init_velocity_constraints(self, step, positions, velocities):
body = self._body_b
self._index = index_b = body._island_index
cb, ab = positions[index_b]
vb, wb = velocities[index_b]
mb = self._inv_mass_b = body._inv_mass
ib = self._inv_Ib = body._invI
self._local_center_b = body._sweep.local_center
qb = Mat22(angle=ab)
self._mass = mass = body.mass
# Frequency
omega = 2.0 * PI * self._frequency
# Damping coefficient
d = 2.0 * mass * self._damping_ratio * omega
# Spring stiffness
k = mass * (omega ** 2)
# magic formulas
# gamma has units of inverse mass.
# beta has units of inverse time.
dt = step.dt
assert(d + dt * k > EPSILON)
self._gamma = dt * (d + dt * k)
if self._gamma != 0.0:
self._gamma = 1.0 / self._gamma
self._beta = dt * k * self._gamma
# Compute the effective mass matrix.
rb = self._rb = qb * (self._local_anchor_b - self._local_center_b)
# K = [(1/ma + 1/mb) * eye(2) - skew(ra) * invIa * skew(ra) - skew(rb) * invIb * skew(rb)]
# = [1/ma+1/mb 0 ] + invIa * [ra.y*ra.y -ra.x*ra.y] + invIb * [ra.y*ra.y -ra.x*ra.y]
# [ 0 1/ma+1/mb] [-ra.x*ra.y ra.x*ra.x] [-ra.x*ra.y ra.x*ra.x]
K = Mat22()
K.col1 = Vec2(mb + ib * rb.y ** 2 + self._gamma,
-ib * rb.x * rb.y)
K.col2 = Vec2(K.col1.y,
mb + ib * rb.x ** 2 + self._gamma)
self._mass = K.inverse
self._c = self._beta * (cb + rb - self._target)
# Cheat with some damping
wb *= 0.98
if step.warm_starting:
# Warm starting.
self._impulse *= step.dt_ratio
vb += mb * self._impulse
wb += ib * rb.cross(self._impulse)
else:
self._impulse = Vec2()
velocities[index_b] = (vb, wb)
def _solve_velocity_constraints(self, step, positions, velocities):
index_b = self._index
cb, ab = positions[index_b]
vb, wb = velocities[index_b]
mb = self._inv_mass_b
ib = self._inv_Ib
rb = self._rb
# Cdot = v + cross(w, r)
Cdot = vb + scalar_cross(wb, rb)
impulse = self._mass * (-(Cdot + self._c + self._gamma * self._impulse))
old_impulse = self._impulse
self._impulse += impulse
max_impulse = step.dt * self._max_force
if self._impulse.length_squared > max_impulse ** 2:
self._impulse *= max_impulse / self._impulse.length
impulse = self._impulse - old_impulse
vb += mb * impulse
wb += ib * rb.cross(impulse)
velocities[index_b] = (vb, wb)
def _solve_position_constraints(self, step, positions, velocities):
"""This returns true if the position errors are within tolerance."""
return True
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2011 <NAME> http://www.box2d.org
# Python port by <NAME> / http://pybox2d.googlecode.com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from __future__ import absolute_import
__all__ = ('MouseJoint', )
__version__ = "$Revision: 353 $"
__date__ = "$Date: 2011-07-15 17:13:40 -0400 (Fri, 15 Jul 2011) $"
# $Source$
from ..common import (PI, Vec2, Mat22, scalar_cross, is_valid_float, property)
from ..settings import EPSILON
from .joint import Joint
class MouseJoint(Joint):
"""
A mouse joint is used to make a point on a body track a
specified world point. This a soft constraint with a maximum
force. This allows the constraint to stretch and without
applying huge forces.
Creation requires a world target point, tuning parameters, and
the time step.
NOTE: this joint is not documented in the manual because it was
developed to be used in the testbed. If you want to learn how to
use the mouse joint, look at the testbed.
"""
# p = attached point, m = mouse point
# C = p - m
# Cdot = v
# = v + cross(w, r)
# J = [I r_skew]
# Identity used:
# w k % (rx i + ry j) = w * (-ry i + rx j)
def __init__(self, body, target=(0, 0), max_force = 0.0, frequency=5.0, damping_ratio=0.7):
if body is None:
raise ValueError('body must be set')
target = Vec2(*target)
if not target.valid:
raise ValueError('Invalid target')
if not is_valid_float(max_force) or max_force < 0.0:
raise ValueError('Invalid maximum force')
if not is_valid_float(frequency) or frequency < 0.0:
raise ValueError('Invalid frequency')
if not is_valid_float(damping_ratio) or damping_ratio < 0.0:
raise ValueError('Invalid damping ratio')
Joint.__init__(self, None, body, False)
self._target = target
self._local_anchor_b = body.get_local_point(target)
self._max_force = max_force
self._impulse = Vec2()
self._frequency = frequency
self._damping_ratio = damping_ratio
self._beta = 0.0
self._gamma = 0.0
def __copy__(self):
return MouseJoint(self._body_b, self._target,
self._max_force, self._frequency, self._damping_ratio)
def get_reaction_force(self, inv_dt):
"""Get the reaction force on body_b at the joint anchor in Newtons."""
return inv_dt * self._impulse
def get_reaction_torque(self, inv_dt):
"""Get the reaction torque on body_b in N*m."""
return 0.0 # inv_dt * 0.0
@property
def target(self):
"""
The target point. This is assumed to coincide with the body
anchor initially.
"""
return Vec2(*self._target)
@target.setter
def target(self, target):
if not self._body_b.awake:
self._body_b.awake = True
self._target = Vec2(*target)
@property
def max_force(self):
"""
The maximum constraint force that can be exerted
to move the candidate body. Usually you will express
as some multiple of the weight (multiplier * mass * gravity).
"""
return self._max_force
@max_force.setter
def max_force(self, max_force):
self._max_force = max_force
@property
def frequency(self):
"""The response speed"""
return self._frequency
@frequency.setter
def frequency(self, frequency):
self._frequency = frequency
@property
def damping_ratio(self):
"""The damping ratio: 0 = no damping, 1 = critical damping"""
return self._damping_ratio
@damping_ratio.setter
def damping_ratio(self, damping_ratio):
self._damping_ratio = damping_ratio
def _init_velocity_constraints(self, step, positions, velocities):
body = self._body_b
self._index = index_b = body._island_index
cb, ab = positions[index_b]
vb, wb = velocities[index_b]
mb = self._inv_mass_b = body._inv_mass
ib = self._inv_Ib = body._invI
self._local_center_b = body._sweep.local_center
qb = Mat22(angle=ab)
self._mass = mass = body.mass
# Frequency
omega = 2.0 * PI * self._frequency
# Damping coefficient
d = 2.0 * mass * self._damping_ratio * omega
# Spring stiffness
k = mass * (omega ** 2)
# magic formulas
# gamma has units of inverse mass.
# beta has units of inverse time.
dt = step.dt
assert(d + dt * k > EPSILON)
self._gamma = dt * (d + dt * k)
if self._gamma != 0.0:
self._gamma = 1.0 / self._gamma
self._beta = dt * k * self._gamma
# Compute the effective mass matrix.
rb = self._rb = qb * (self._local_anchor_b - self._local_center_b)
# K = [(1/ma + 1/mb) * eye(2) - skew(ra) * invIa * skew(ra) - skew(rb) * invIb * skew(rb)]
# = [1/ma+1/mb 0 ] + invIa * [ra.y*ra.y -ra.x*ra.y] + invIb * [ra.y*ra.y -ra.x*ra.y]
# [ 0 1/ma+1/mb] [-ra.x*ra.y ra.x*ra.x] [-ra.x*ra.y ra.x*ra.x]
K = Mat22()
K.col1 = Vec2(mb + ib * rb.y ** 2 + self._gamma,
-ib * rb.x * rb.y)
K.col2 = Vec2(K.col1.y,
mb + ib * rb.x ** 2 + self._gamma)
self._mass = K.inverse
self._c = self._beta * (cb + rb - self._target)
# Cheat with some damping
wb *= 0.98
if step.warm_starting:
# Warm starting.
self._impulse *= step.dt_ratio
vb += mb * self._impulse
wb += ib * rb.cross(self._impulse)
else:
self._impulse = Vec2()
velocities[index_b] = (vb, wb)
def _solve_velocity_constraints(self, step, positions, velocities):
index_b = self._index
cb, ab = positions[index_b]
vb, wb = velocities[index_b]
mb = self._inv_mass_b
ib = self._inv_Ib
rb = self._rb
# Cdot = v + cross(w, r)
Cdot = vb + scalar_cross(wb, rb)
impulse = self._mass * (-(Cdot + self._c + self._gamma * self._impulse))
old_impulse = self._impulse
self._impulse += impulse
max_impulse = step.dt * self._max_force
if self._impulse.length_squared > max_impulse ** 2:
self._impulse *= max_impulse / self._impulse.length
impulse = self._impulse - old_impulse
vb += mb * impulse
wb += ib * rb.cross(impulse)
velocities[index_b] = (vb, wb)
def _solve_position_constraints(self, step, positions, velocities):
"""This returns true if the position errors are within tolerance."""
return True
| en | 0.848979 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # C++ version Copyright (c) 2006-2011 <NAME> http://www.box2d.org # Python port by <NAME> / http://pybox2d.googlecode.com # # This software is provided 'as-is', without any express or implied # warranty. In no event will the authors be held liable for any damages # arising from the use of this software. # Permission is granted to anyone to use this software for any purpose, # including commercial applications, and to alter it and redistribute it # freely, subject to the following restrictions: # 1. The origin of this software must not be misrepresented; you must not # claim that you wrote the original software. If you use this software # in a product, an acknowledgment in the product documentation would be # appreciated but is not required. # 2. Altered source versions must be plainly marked as such, and must not be # misrepresented as being the original software. # 3. This notice may not be removed or altered from any source distribution. # $Source$ A mouse joint is used to make a point on a body track a
specified world point. This a soft constraint with a maximum
force. This allows the constraint to stretch and without
applying huge forces.
Creation requires a world target point, tuning parameters, and
the time step.
NOTE: this joint is not documented in the manual because it was
developed to be used in the testbed. If you want to learn how to
use the mouse joint, look at the testbed. # p = attached point, m = mouse point # C = p - m # Cdot = v # = v + cross(w, r) # J = [I r_skew] # Identity used: # w k % (rx i + ry j) = w * (-ry i + rx j) Get the reaction force on body_b at the joint anchor in Newtons. Get the reaction torque on body_b in N*m. # inv_dt * 0.0 The target point. This is assumed to coincide with the body
anchor initially. The maximum constraint force that can be exerted
to move the candidate body. Usually you will express
as some multiple of the weight (multiplier * mass * gravity). The response speed The damping ratio: 0 = no damping, 1 = critical damping # Frequency # Damping coefficient # Spring stiffness # magic formulas # gamma has units of inverse mass. # beta has units of inverse time. # Compute the effective mass matrix. # K = [(1/ma + 1/mb) * eye(2) - skew(ra) * invIa * skew(ra) - skew(rb) * invIb * skew(rb)] # = [1/ma+1/mb 0 ] + invIa * [ra.y*ra.y -ra.x*ra.y] + invIb * [ra.y*ra.y -ra.x*ra.y] # [ 0 1/ma+1/mb] [-ra.x*ra.y ra.x*ra.x] [-ra.x*ra.y ra.x*ra.x] # Cheat with some damping # Warm starting. # Cdot = v + cross(w, r) This returns true if the position errors are within tolerance. | 2.061143 | 2 |
gin/i_o/test/test_from_smiles_rdkit.py | choderalab/gin | 24 | 6612629 | <gh_stars>10-100
import gin
import rdkit
from rdkit import Chem
import pandas as pd
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
BONDS = {
Chem.BondType.SINGLE:1.0,
Chem.BondType.DOUBLE:2.0,
Chem.BondType.TRIPLE:3.0,
Chem.BondType.AROMATIC:1.5,
Chem.BondType.UNSPECIFIED:0.0
}
def get_adjacency_matrix_rdkit(smiles):
mol = Chem.MolFromSmiles(smiles)
n_atoms = mol.GetNumAtoms()
# initialize an adjacency_map
adjacency_map = np.zeros((n_atoms, n_atoms))
# get a list of bonds
bonds = mol.GetBonds()
# loop through these bonds
for bond in bonds:
# order = BONDS[bond.GetBondType()]
atom0_idx = bond.GetBeginAtomIdx()
atom1_idx = bond.GetEndAtomIdx()
adjacency_map[atom0_idx, atom1_idx] = 1.
adjacency_map[atom1_idx, atom0_idx] = 1.
# adjacency_map = np.triu(adjacency_map)
return adjacency_map
def get_num_bonds(smiles):
mol = Chem.MolFromSmiles(smiles)
mol = Chem.rdmolops.RemoveHs(mol)
bonds = mol.GetBonds()
return len(bonds)
def get_eigenvalues_from_adjacency_map(adjacency_map):
eigen_values, _ = np.linalg.eigh(adjacency_map)
return eigen_values
df = pd.read_csv('data/SAMPL.csv')
df = df[~df['smiles'].str.contains('B')]
df = df[~df['smiles'].str.contains('\+')]
df = df[~df['smiles'].str.contains('\-')]
smiles_array = df[['smiles']].values.flatten()
'''
@pytest.mark.parametrize('smiles', smiles_array)
def test_num_bonds(smiles):
npt.assert_almost_equal(
get_num_bonds(smiles),
np.count_nonzero(
gin.i_o.from_smiles.smiles_to_mol(
smiles)[1]))
'''
@pytest.mark.parametrize('smiles', smiles_array)
def test_adjacency_map(smiles):
adjacency_map_rdkit = get_adjacency_matrix_rdkit(smiles)
adjacency_map_gin = gin.i_o.from_smiles.to_mol(
smiles)[1]
adjacency_map_gin = tf.where(
tf.greater(
adjacency_map_gin,
tf.constant(0, dtype=tf.float32)),
tf.ones_like(adjacency_map_gin),
tf.zeros_like(adjacency_map_gin))
adjacency_map_gin = adjacency_map_gin + tf.transpose(adjacency_map_gin)
eighs_rdkit = get_eigenvalues_from_adjacency_map(
adjacency_map_rdkit)
eighs_gin = get_eigenvalues_from_adjacency_map(
adjacency_map_gin)
err_msg = str(adjacency_map_rdkit) + str(adjacency_map_gin)
npt.assert_almost_equal(
eighs_rdkit,
eighs_gin,
err_msg = err_msg)
| import gin
import rdkit
from rdkit import Chem
import pandas as pd
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
BONDS = {
Chem.BondType.SINGLE:1.0,
Chem.BondType.DOUBLE:2.0,
Chem.BondType.TRIPLE:3.0,
Chem.BondType.AROMATIC:1.5,
Chem.BondType.UNSPECIFIED:0.0
}
def get_adjacency_matrix_rdkit(smiles):
mol = Chem.MolFromSmiles(smiles)
n_atoms = mol.GetNumAtoms()
# initialize an adjacency_map
adjacency_map = np.zeros((n_atoms, n_atoms))
# get a list of bonds
bonds = mol.GetBonds()
# loop through these bonds
for bond in bonds:
# order = BONDS[bond.GetBondType()]
atom0_idx = bond.GetBeginAtomIdx()
atom1_idx = bond.GetEndAtomIdx()
adjacency_map[atom0_idx, atom1_idx] = 1.
adjacency_map[atom1_idx, atom0_idx] = 1.
# adjacency_map = np.triu(adjacency_map)
return adjacency_map
def get_num_bonds(smiles):
mol = Chem.MolFromSmiles(smiles)
mol = Chem.rdmolops.RemoveHs(mol)
bonds = mol.GetBonds()
return len(bonds)
def get_eigenvalues_from_adjacency_map(adjacency_map):
eigen_values, _ = np.linalg.eigh(adjacency_map)
return eigen_values
df = pd.read_csv('data/SAMPL.csv')
df = df[~df['smiles'].str.contains('B')]
df = df[~df['smiles'].str.contains('\+')]
df = df[~df['smiles'].str.contains('\-')]
smiles_array = df[['smiles']].values.flatten()
'''
@pytest.mark.parametrize('smiles', smiles_array)
def test_num_bonds(smiles):
npt.assert_almost_equal(
get_num_bonds(smiles),
np.count_nonzero(
gin.i_o.from_smiles.smiles_to_mol(
smiles)[1]))
'''
@pytest.mark.parametrize('smiles', smiles_array)
def test_adjacency_map(smiles):
adjacency_map_rdkit = get_adjacency_matrix_rdkit(smiles)
adjacency_map_gin = gin.i_o.from_smiles.to_mol(
smiles)[1]
adjacency_map_gin = tf.where(
tf.greater(
adjacency_map_gin,
tf.constant(0, dtype=tf.float32)),
tf.ones_like(adjacency_map_gin),
tf.zeros_like(adjacency_map_gin))
adjacency_map_gin = adjacency_map_gin + tf.transpose(adjacency_map_gin)
eighs_rdkit = get_eigenvalues_from_adjacency_map(
adjacency_map_rdkit)
eighs_gin = get_eigenvalues_from_adjacency_map(
adjacency_map_gin)
err_msg = str(adjacency_map_rdkit) + str(adjacency_map_gin)
npt.assert_almost_equal(
eighs_rdkit,
eighs_gin,
err_msg = err_msg) | en | 0.287805 | # initialize an adjacency_map # get a list of bonds # loop through these bonds # order = BONDS[bond.GetBondType()] # adjacency_map = np.triu(adjacency_map) @pytest.mark.parametrize('smiles', smiles_array) def test_num_bonds(smiles): npt.assert_almost_equal( get_num_bonds(smiles), np.count_nonzero( gin.i_o.from_smiles.smiles_to_mol( smiles)[1])) | 2.489811 | 2 |
backend/tester.py | alexp25/smart-home | 0 | 6612630 | from flask import Flask
from flask import render_template, send_file, session, Response, request, make_response, send_from_directory
from flask import jsonify
import json
import datetime
import os
import subprocess
import copy
import gevent
import gevent.monkey
from gevent.pywsgi import WSGIServer
# gevent.monkey.patch_time()
gevent.monkey.patch_all(socket=True, dns=True, time=True, select=True, thread=False, os=False, ssl=True, httplib=False, subprocess=False, sys=False, aggressive=True, Event=False, builtins=True, signal=False)
from flask_sockets import Sockets
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from AppModules.DebugPrintThread import DebugPrintThread
import appVariables
# only the main modules calls init
# the other modules using the global variables just import "appVariables"
appVariables.init()
from bson import json_util
from Modules.mongo_db import MongoManager
mongomanager = MongoManager()
mongomanager.connect()
app = Flask(__name__)
sockets = Sockets(app)
@app.route('/find')
def find():
result = mongomanager.find("test","test2",None)
return result
@app.route('/insert',methods=['POST'])
def insert():
print(request.json)
# document = json.dumps(request.json)
# print(document)
document=request.json
result = mongomanager.insert("test","test2",document)
# result = json.dumps({"result":1})
return result
@app.route('/pipeline')
def averageq():
pipeline = [{"$match": {"s_id": 132, "ts": {"$gt": "2017-02-18 17:04:38.146000"}}},
{"$group": {"_id": "$s_id", "avg": {"$avg": "$value"}}}
]
result = mongomanager.aggregate_pipeline("mydb","sensor_data", pipeline)
print(result)
return json.dumps(result, default=json_util.default)
if __name__ == '__main__':
print('tester started')
thread5 = DebugPrintThread()
thread5.start()
server = pywsgi.WSGIServer(('0.0.0.0', 8100), app, handler_class=WebSocketHandler)
server.serve_forever() | from flask import Flask
from flask import render_template, send_file, session, Response, request, make_response, send_from_directory
from flask import jsonify
import json
import datetime
import os
import subprocess
import copy
import gevent
import gevent.monkey
from gevent.pywsgi import WSGIServer
# gevent.monkey.patch_time()
gevent.monkey.patch_all(socket=True, dns=True, time=True, select=True, thread=False, os=False, ssl=True, httplib=False, subprocess=False, sys=False, aggressive=True, Event=False, builtins=True, signal=False)
from flask_sockets import Sockets
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from AppModules.DebugPrintThread import DebugPrintThread
import appVariables
# only the main modules calls init
# the other modules using the global variables just import "appVariables"
appVariables.init()
from bson import json_util
from Modules.mongo_db import MongoManager
mongomanager = MongoManager()
mongomanager.connect()
app = Flask(__name__)
sockets = Sockets(app)
@app.route('/find')
def find():
result = mongomanager.find("test","test2",None)
return result
@app.route('/insert',methods=['POST'])
def insert():
print(request.json)
# document = json.dumps(request.json)
# print(document)
document=request.json
result = mongomanager.insert("test","test2",document)
# result = json.dumps({"result":1})
return result
@app.route('/pipeline')
def averageq():
pipeline = [{"$match": {"s_id": 132, "ts": {"$gt": "2017-02-18 17:04:38.146000"}}},
{"$group": {"_id": "$s_id", "avg": {"$avg": "$value"}}}
]
result = mongomanager.aggregate_pipeline("mydb","sensor_data", pipeline)
print(result)
return json.dumps(result, default=json_util.default)
if __name__ == '__main__':
print('tester started')
thread5 = DebugPrintThread()
thread5.start()
server = pywsgi.WSGIServer(('0.0.0.0', 8100), app, handler_class=WebSocketHandler)
server.serve_forever() | en | 0.358978 | # gevent.monkey.patch_time() # only the main modules calls init # the other modules using the global variables just import "appVariables" # document = json.dumps(request.json) # print(document) # result = json.dumps({"result":1}) | 2.305122 | 2 |
IODR_growth_rate.py | danolson1/IODR_python | 0 | 6612631 | ###############################################################################
# IODR_growth_rate
#
# <NAME> 5-19-2020
# Library for measuring growth rate from optical density data
#
# Notes on use:
# copied from IODR - LL1592 ethnol adaptation.ipynb notebook
# C:\Users\Dan\Documents\Lynd Lab research\Ctherm CBP project\high ethanol adaptation for C therm 9-30-2019\IODR - LL1592 ethanol adaptation v5.ipynb
###############################################################################
# perform required imports
import pandas as pd
import numpy as np
from scipy.signal import find_peaks
from scipy.optimize import curve_fit
from matplotlib import pyplot as plt
from scipy import stats # for sliding window slope measurements
def linear_curve(t, a, b):
"""
fit data to linear model
"""
return a*t + b
def gompertz_curve(t, A, umax, lag, offset):
"""
fit data to 3-parameter logistic Gompertz equation
Modified form from Zwietering et al. 1990, "Modeling of the Bacterial Growth Curve"
Parameters:
t: time (hours)
umax: maximum specific growth rate (hr^-1)
lag: lag time
A: log ratio of initial to final population
offset: parameter for shifting the curve up and down
"""
y = A * np.exp(-np.exp(((umax * np.exp(1))/(A))*(lag - t) + 1)) + offset
return(y)
def growth_analysis(data, init_OD = 0.01, reliable_OD_range = (0.03, 1), peak_distance = 10, smoothing_window = 10, peak_prominence = 0.005, show_graphs = True, epsilon = 0.1):
"""
data: a Pandas dataframe with the following columns:
OD: absorbance data at 600 nm
etime: elapsed time in days
init_OD: initial OD. For a 1:100 dilution of a OD=1 culture, the init_OD value would be 0.01
reliable_OD_range: tuple (min, max) giving the minimum and maximum OD values that are considered reliable
smoothing_window: number of points to use for smoothing data
show_graphs: boolean flag to show graphs of curve fits
epsilon: error term for bounds when fitting fixed parameters to Gompertz curve
Return a Pandas series with the following information:
maxOD
umax_gompertz: maximum growth rate as determined by Gompertz curve fit
umax_gompertz_err: umax standard error from Gompertz fit
umax_slope: maximum growth rate as determined by slope of log-transformed data
umax_slope_err: emax standard error from slope fit
"""
# set elapsed time to hours
data['etime'] = data['etime']*24 # convert days to hours
# smooth data to eliminate outliers
data['smooth'] = data.OD.rolling(smoothing_window, center = True).mean()
# determine min, max and midpoint of data
minOD = data.smooth.min()
maxOD = data.smooth.max()
midOD = (maxOD - minOD)/2 + minOD
# adjust OD so that minOD = init_OD
data.OD = data.OD - minOD + init_OD
data.smooth = data.smooth - minOD + init_OD
# recalculate min and max OD
minOD = data.smooth.min()
maxOD = data.smooth.max()
# determine midpoint crossings
data['nextOD'] = data['smooth'].shift(-1) # column with the OD value of the subsequent timepoint
data['cross'] = ((data.smooth <= midOD) & (data.nextOD > midOD))
if data['cross'].sum() == 0:
print('WARNING: no midpoint crossings')
return # we can't do any more calculations, so return
else:
if data['cross'].sum() >= 2:
print('WARNING: more than 1 midpoint crossing')
# find the index of the first crossing, if there are more than one
cross_idx = data.loc[data.cross, :].sort_values('etime', ascending = True).index[0]
# find the peak OD
# the logistic function we're going to use can't account for decreasing OD
peaks = find_peaks(data.smooth,
height = midOD, # peak height must be above the midpoint OD
distance = peak_distance, # if there are several peaks close together, just take the largest one
prominence = peak_prominence,
)[0]
# if there are no peaks, use all of the data
if len(data.iloc[peaks]) == 0:
peak_idx = data.index[-1] # set the peak index to the last point of the dataframe
else:
peak_idx = data.iloc[peaks].index[0]
# find troughs
troughs = find_peaks(data.smooth*-1,
height = midOD*-1, # peak height must be above the midpoint OD
distance = peak_distance, # if there are several peaks close together, just take the largest one
prominence = peak_prominence,
)[0]
# select the last trough before the midpoint crossing
troughDf = data.iloc[troughs, :] # dataframe with just the trough points
before_crossing = troughDf.index < cross_idx # boolean filter for points before crossing
# if there are no troughs before the midpoint crossing, use all data points before the crossing
if len(troughDf.loc[before_crossing, 'etime']) < 1:
trough_idx = data.index[0]
else:
trough_idx = troughDf.loc[before_crossing, 'etime'].index[-1] # get the last index in the dataframe
#print('trough_idx=', trough_idx)
#print('cross_idx=', cross_idx)
#print('peak_idx=', peak_idx)
# select data for fitting curve
# use the data from the first trough before the midpoint crossing to the first peak after the midpoint crossing
data['selected'] = False
data.loc[trough_idx:peak_idx, 'selected'] = True
data2 = data.loc[data['selected'], ['OD', 'etime']].copy()
# use only the data in the reliable OD range
data2 = data2.loc[data2.OD.between(*reliable_OD_range)]
# log transform and drop non-plottable values
data2['lnOD'] = (data2['OD'].apply(np.log))
data2 = data2.replace([np.inf, -np.inf], np.nan)
data2 = data2.dropna()
# perform non-linear curve fit
A_init = (np.log(maxOD) - np.log(minOD)) # the "height" of the original data, from min to max
umax_init = 0.25
lag_init = data2.iloc[0].loc['etime']
offset_init = np.log(minOD)
p0 = [A_init, umax_init, lag_init, offset_init] # initial guess for A, umax, lag, offset
#print('min=', data2.iloc[0].loc['etime'])
#print('max=', data2.iloc[-1].loc['etime'])
#print('p0= ', p0)
try:
popt, pcov = curve_fit(gompertz_curve,
data2['etime'], # elapsed time (hours)
data2['lnOD'], # log-transformed OD data
p0, # initial guess
method = 'trf',
bounds = ((A_init-epsilon, 0, 0, offset_init-epsilon),
(A_init+epsilon, 1, np.inf, offset_init+epsilon)),
)
gomp_x = np.linspace(data['etime'].min(), data['etime'].max(), 50)
gomp_y = gompertz_curve(gomp_x, *popt)
perr = np.sqrt(np.diag(pc))
except:
#print('exception')
#return
raise
# perform linear curve fit on sliding window
fit_window = int(smoothing_window/2) # fit_window needs to be an integer that is half the size of the smoothing window
data2['umax_slope'] = 0
data2['umax_slope_err'] = 0
data2['icept'] = 0
for index, row in data2.iloc[fit_window:-fit_window].iterrows():
data3 = data2.loc[index-window:index+window]
slope, intercept, r_value, p_value, std_err = stats.linregress(data3.etime, data3.lnOD)
#print(slope, ' ', std_err)
data2.loc[index, 'u'] = slope
data2.loc[index, 'u_err'] = std_err
data2.loc[index, 'icept'] = intercept
umax_index = data2.loc[data2.u == data2.u.max(), :].index[0]
# make a dataframe with the points used for the linear fit, for plotting
data3 = data2.loc[umax_index-window:umax_index+window]
lin_x = np.linspace(data3.etime.min(), data3.etime.max(), 10)
lin_y = linear_curve(lin_x, data2.loc[umax_index, 'u'], data2.loc[umax_index, 'icept'])
# prepare series for return values
result_dict = {'maxOD': maxOD,
'umax_gompertz': popt[1],
'umax_gompertz_err': perr[1],
'umax_slope': data2.loc[umax_index, 'u'],
'umax_slope_err': data2.loc[umax_index, 'u_err']}
result_ser = pd.Series(result_dict)
# plot the result
if(show_graphs):
# set up figure
fig, (ax1, ax3, ax2) = plt.subplots(1, 3, sharex =False, figsize = (20,8))
# First panel
ax1.set_title('initial data')
ax1.axhline(minOD, linestyle = "--", color = 'red', alpha = 0.5, label = 'min')
ax1.axhline(midOD, linestyle = "--", color = 'red', alpha = 0.5, label = 'mid')
ax1.axhline(maxOD, linestyle = "--", color = 'red', alpha = 0.5, label = 'max')
ax1.plot(data['etime'], data['OD'], label = 'OD', marker = '.')
ax1.scatter(data.etime.iloc[peaks], data.OD.iloc[peaks], label = 'peaks', marker = 'o', color = 'green', s = 100)
ax1.scatter(data.etime.iloc[troughs], data.OD.iloc[troughs], label = 'troughs', marker = 'o', color = 'red', s = 100)
ax1.scatter(data.etime.loc[cross_idx], data.OD.loc[cross_idx], label = 'midpoint rising cross', marker = 'x', color = 'green', s = 100)
ax1.plot(data2.etime, data2.OD, color = 'orange', label = 'good points', linewidth = 12, alpha = 0.2)
ax1.legend()
# Middle panel
ax3.set_title('smoothed data')
ax3.plot(data['etime'], data['smooth'], label = 'smooth', color = 'brown')
# Third panel
ax2.set_title('log-transformed data')
ax2.axhline(np.log(minOD), linestyle = "--", color = 'red', alpha = 0.5, label = 'min')
ax2.axhline(np.log(midOD), linestyle = "--", color = 'red', alpha = 0.5, label = 'mid')
ax2.axhline(np.log(maxOD), linestyle = "--", color = 'red', alpha = 0.5, label = 'max')
ax2.plot(data2['etime'], data2['lnOD'], label = 'log-OD', marker = '.')
ax2.plot(gomp_x, gomp_y, label = 'gompertz fit', color = 'red', alpha = 0.5, linewidth = 3)
ax2.plot(lin_x, lin_y, label = 'linear fit', color = 'green', alpha = 0.5, linewidth = 6)
ax2.legend()
#print('A, umax, lag, offset')
#print(popt)
#print('minOD, midOD, maxOD')
#print(",".join("{:.2f}".format(x) for x in [minOD, midOD, maxOD]))
plt.show()
return result_ser
| ###############################################################################
# IODR_growth_rate
#
# <NAME> 5-19-2020
# Library for measuring growth rate from optical density data
#
# Notes on use:
# copied from IODR - LL1592 ethnol adaptation.ipynb notebook
# C:\Users\Dan\Documents\Lynd Lab research\Ctherm CBP project\high ethanol adaptation for C therm 9-30-2019\IODR - LL1592 ethanol adaptation v5.ipynb
###############################################################################
# perform required imports
import pandas as pd
import numpy as np
from scipy.signal import find_peaks
from scipy.optimize import curve_fit
from matplotlib import pyplot as plt
from scipy import stats # for sliding window slope measurements
def linear_curve(t, a, b):
"""
fit data to linear model
"""
return a*t + b
def gompertz_curve(t, A, umax, lag, offset):
"""
fit data to 3-parameter logistic Gompertz equation
Modified form from Zwietering et al. 1990, "Modeling of the Bacterial Growth Curve"
Parameters:
t: time (hours)
umax: maximum specific growth rate (hr^-1)
lag: lag time
A: log ratio of initial to final population
offset: parameter for shifting the curve up and down
"""
y = A * np.exp(-np.exp(((umax * np.exp(1))/(A))*(lag - t) + 1)) + offset
return(y)
def growth_analysis(data, init_OD = 0.01, reliable_OD_range = (0.03, 1), peak_distance = 10, smoothing_window = 10, peak_prominence = 0.005, show_graphs = True, epsilon = 0.1):
"""
data: a Pandas dataframe with the following columns:
OD: absorbance data at 600 nm
etime: elapsed time in days
init_OD: initial OD. For a 1:100 dilution of a OD=1 culture, the init_OD value would be 0.01
reliable_OD_range: tuple (min, max) giving the minimum and maximum OD values that are considered reliable
smoothing_window: number of points to use for smoothing data
show_graphs: boolean flag to show graphs of curve fits
epsilon: error term for bounds when fitting fixed parameters to Gompertz curve
Return a Pandas series with the following information:
maxOD
umax_gompertz: maximum growth rate as determined by Gompertz curve fit
umax_gompertz_err: umax standard error from Gompertz fit
umax_slope: maximum growth rate as determined by slope of log-transformed data
umax_slope_err: emax standard error from slope fit
"""
# set elapsed time to hours
data['etime'] = data['etime']*24 # convert days to hours
# smooth data to eliminate outliers
data['smooth'] = data.OD.rolling(smoothing_window, center = True).mean()
# determine min, max and midpoint of data
minOD = data.smooth.min()
maxOD = data.smooth.max()
midOD = (maxOD - minOD)/2 + minOD
# adjust OD so that minOD = init_OD
data.OD = data.OD - minOD + init_OD
data.smooth = data.smooth - minOD + init_OD
# recalculate min and max OD
minOD = data.smooth.min()
maxOD = data.smooth.max()
# determine midpoint crossings
data['nextOD'] = data['smooth'].shift(-1) # column with the OD value of the subsequent timepoint
data['cross'] = ((data.smooth <= midOD) & (data.nextOD > midOD))
if data['cross'].sum() == 0:
print('WARNING: no midpoint crossings')
return # we can't do any more calculations, so return
else:
if data['cross'].sum() >= 2:
print('WARNING: more than 1 midpoint crossing')
# find the index of the first crossing, if there are more than one
cross_idx = data.loc[data.cross, :].sort_values('etime', ascending = True).index[0]
# find the peak OD
# the logistic function we're going to use can't account for decreasing OD
peaks = find_peaks(data.smooth,
height = midOD, # peak height must be above the midpoint OD
distance = peak_distance, # if there are several peaks close together, just take the largest one
prominence = peak_prominence,
)[0]
# if there are no peaks, use all of the data
if len(data.iloc[peaks]) == 0:
peak_idx = data.index[-1] # set the peak index to the last point of the dataframe
else:
peak_idx = data.iloc[peaks].index[0]
# find troughs
troughs = find_peaks(data.smooth*-1,
height = midOD*-1, # peak height must be above the midpoint OD
distance = peak_distance, # if there are several peaks close together, just take the largest one
prominence = peak_prominence,
)[0]
# select the last trough before the midpoint crossing
troughDf = data.iloc[troughs, :] # dataframe with just the trough points
before_crossing = troughDf.index < cross_idx # boolean filter for points before crossing
# if there are no troughs before the midpoint crossing, use all data points before the crossing
if len(troughDf.loc[before_crossing, 'etime']) < 1:
trough_idx = data.index[0]
else:
trough_idx = troughDf.loc[before_crossing, 'etime'].index[-1] # get the last index in the dataframe
#print('trough_idx=', trough_idx)
#print('cross_idx=', cross_idx)
#print('peak_idx=', peak_idx)
# select data for fitting curve
# use the data from the first trough before the midpoint crossing to the first peak after the midpoint crossing
data['selected'] = False
data.loc[trough_idx:peak_idx, 'selected'] = True
data2 = data.loc[data['selected'], ['OD', 'etime']].copy()
# use only the data in the reliable OD range
data2 = data2.loc[data2.OD.between(*reliable_OD_range)]
# log transform and drop non-plottable values
data2['lnOD'] = (data2['OD'].apply(np.log))
data2 = data2.replace([np.inf, -np.inf], np.nan)
data2 = data2.dropna()
# perform non-linear curve fit
A_init = (np.log(maxOD) - np.log(minOD)) # the "height" of the original data, from min to max
umax_init = 0.25
lag_init = data2.iloc[0].loc['etime']
offset_init = np.log(minOD)
p0 = [A_init, umax_init, lag_init, offset_init] # initial guess for A, umax, lag, offset
#print('min=', data2.iloc[0].loc['etime'])
#print('max=', data2.iloc[-1].loc['etime'])
#print('p0= ', p0)
try:
popt, pcov = curve_fit(gompertz_curve,
data2['etime'], # elapsed time (hours)
data2['lnOD'], # log-transformed OD data
p0, # initial guess
method = 'trf',
bounds = ((A_init-epsilon, 0, 0, offset_init-epsilon),
(A_init+epsilon, 1, np.inf, offset_init+epsilon)),
)
gomp_x = np.linspace(data['etime'].min(), data['etime'].max(), 50)
gomp_y = gompertz_curve(gomp_x, *popt)
perr = np.sqrt(np.diag(pc))
except:
#print('exception')
#return
raise
# perform linear curve fit on sliding window
fit_window = int(smoothing_window/2) # fit_window needs to be an integer that is half the size of the smoothing window
data2['umax_slope'] = 0
data2['umax_slope_err'] = 0
data2['icept'] = 0
for index, row in data2.iloc[fit_window:-fit_window].iterrows():
data3 = data2.loc[index-window:index+window]
slope, intercept, r_value, p_value, std_err = stats.linregress(data3.etime, data3.lnOD)
#print(slope, ' ', std_err)
data2.loc[index, 'u'] = slope
data2.loc[index, 'u_err'] = std_err
data2.loc[index, 'icept'] = intercept
umax_index = data2.loc[data2.u == data2.u.max(), :].index[0]
# make a dataframe with the points used for the linear fit, for plotting
data3 = data2.loc[umax_index-window:umax_index+window]
lin_x = np.linspace(data3.etime.min(), data3.etime.max(), 10)
lin_y = linear_curve(lin_x, data2.loc[umax_index, 'u'], data2.loc[umax_index, 'icept'])
# prepare series for return values
result_dict = {'maxOD': maxOD,
'umax_gompertz': popt[1],
'umax_gompertz_err': perr[1],
'umax_slope': data2.loc[umax_index, 'u'],
'umax_slope_err': data2.loc[umax_index, 'u_err']}
result_ser = pd.Series(result_dict)
# plot the result
if(show_graphs):
# set up figure
fig, (ax1, ax3, ax2) = plt.subplots(1, 3, sharex =False, figsize = (20,8))
# First panel
ax1.set_title('initial data')
ax1.axhline(minOD, linestyle = "--", color = 'red', alpha = 0.5, label = 'min')
ax1.axhline(midOD, linestyle = "--", color = 'red', alpha = 0.5, label = 'mid')
ax1.axhline(maxOD, linestyle = "--", color = 'red', alpha = 0.5, label = 'max')
ax1.plot(data['etime'], data['OD'], label = 'OD', marker = '.')
ax1.scatter(data.etime.iloc[peaks], data.OD.iloc[peaks], label = 'peaks', marker = 'o', color = 'green', s = 100)
ax1.scatter(data.etime.iloc[troughs], data.OD.iloc[troughs], label = 'troughs', marker = 'o', color = 'red', s = 100)
ax1.scatter(data.etime.loc[cross_idx], data.OD.loc[cross_idx], label = 'midpoint rising cross', marker = 'x', color = 'green', s = 100)
ax1.plot(data2.etime, data2.OD, color = 'orange', label = 'good points', linewidth = 12, alpha = 0.2)
ax1.legend()
# Middle panel
ax3.set_title('smoothed data')
ax3.plot(data['etime'], data['smooth'], label = 'smooth', color = 'brown')
# Third panel
ax2.set_title('log-transformed data')
ax2.axhline(np.log(minOD), linestyle = "--", color = 'red', alpha = 0.5, label = 'min')
ax2.axhline(np.log(midOD), linestyle = "--", color = 'red', alpha = 0.5, label = 'mid')
ax2.axhline(np.log(maxOD), linestyle = "--", color = 'red', alpha = 0.5, label = 'max')
ax2.plot(data2['etime'], data2['lnOD'], label = 'log-OD', marker = '.')
ax2.plot(gomp_x, gomp_y, label = 'gompertz fit', color = 'red', alpha = 0.5, linewidth = 3)
ax2.plot(lin_x, lin_y, label = 'linear fit', color = 'green', alpha = 0.5, linewidth = 6)
ax2.legend()
#print('A, umax, lag, offset')
#print(popt)
#print('minOD, midOD, maxOD')
#print(",".join("{:.2f}".format(x) for x in [minOD, midOD, maxOD]))
plt.show()
return result_ser
| en | 0.728002 | ############################################################################### # IODR_growth_rate # # <NAME> 5-19-2020 # Library for measuring growth rate from optical density data # # Notes on use: # copied from IODR - LL1592 ethnol adaptation.ipynb notebook # C:\Users\Dan\Documents\Lynd Lab research\Ctherm CBP project\high ethanol adaptation for C therm 9-30-2019\IODR - LL1592 ethanol adaptation v5.ipynb ############################################################################### # perform required imports # for sliding window slope measurements fit data to linear model fit data to 3-parameter logistic Gompertz equation Modified form from Zwietering et al. 1990, "Modeling of the Bacterial Growth Curve" Parameters: t: time (hours) umax: maximum specific growth rate (hr^-1) lag: lag time A: log ratio of initial to final population offset: parameter for shifting the curve up and down data: a Pandas dataframe with the following columns: OD: absorbance data at 600 nm etime: elapsed time in days init_OD: initial OD. For a 1:100 dilution of a OD=1 culture, the init_OD value would be 0.01 reliable_OD_range: tuple (min, max) giving the minimum and maximum OD values that are considered reliable smoothing_window: number of points to use for smoothing data show_graphs: boolean flag to show graphs of curve fits epsilon: error term for bounds when fitting fixed parameters to Gompertz curve Return a Pandas series with the following information: maxOD umax_gompertz: maximum growth rate as determined by Gompertz curve fit umax_gompertz_err: umax standard error from Gompertz fit umax_slope: maximum growth rate as determined by slope of log-transformed data umax_slope_err: emax standard error from slope fit # set elapsed time to hours # convert days to hours # smooth data to eliminate outliers # determine min, max and midpoint of data # adjust OD so that minOD = init_OD # recalculate min and max OD # determine midpoint crossings # column with the OD value of the subsequent timepoint # we can't do any more calculations, so return # find the index of the first crossing, if there are more than one # find the peak OD # the logistic function we're going to use can't account for decreasing OD # peak height must be above the midpoint OD # if there are several peaks close together, just take the largest one # if there are no peaks, use all of the data # set the peak index to the last point of the dataframe # find troughs # peak height must be above the midpoint OD # if there are several peaks close together, just take the largest one # select the last trough before the midpoint crossing # dataframe with just the trough points # boolean filter for points before crossing # if there are no troughs before the midpoint crossing, use all data points before the crossing # get the last index in the dataframe #print('trough_idx=', trough_idx) #print('cross_idx=', cross_idx) #print('peak_idx=', peak_idx) # select data for fitting curve # use the data from the first trough before the midpoint crossing to the first peak after the midpoint crossing # use only the data in the reliable OD range # log transform and drop non-plottable values # perform non-linear curve fit # the "height" of the original data, from min to max # initial guess for A, umax, lag, offset #print('min=', data2.iloc[0].loc['etime']) #print('max=', data2.iloc[-1].loc['etime']) #print('p0= ', p0) # elapsed time (hours) # log-transformed OD data # initial guess #print('exception') #return # perform linear curve fit on sliding window # fit_window needs to be an integer that is half the size of the smoothing window #print(slope, ' ', std_err) # make a dataframe with the points used for the linear fit, for plotting # prepare series for return values # plot the result # set up figure # First panel # Middle panel # Third panel #print('A, umax, lag, offset') #print(popt) #print('minOD, midOD, maxOD') #print(",".join("{:.2f}".format(x) for x in [minOD, midOD, maxOD])) | 2.587093 | 3 |
constants.py | paprikachan/biotool | 0 | 6612632 | # -*- coding: utf-8 -*-
"""
utils.constants
~~~~~~~~~~~~~~~
Useful bio constants specification.
@Copyright: (c) 2017 by <NAME> (<EMAIL>).
@License: LICENSE_NAME, see LICENSE for more details.
"""
chrs = ['chr%d' % i for i in range(1, 23)] + ['chrX', 'chrY', 'chrM']
hg19_fai_bp = {
'chr1': 249250621,
'chr2': 243199373,
'chr3': 198022430,
'chr4': 191154276,
'chr5': 180915260,
'chr6': 171115067,
'chr7': 159138663,
'chr8': 146364022,
'chr9': 141213431,
'chr10': 135534747,
'chr11': 135006516,
'chr12': 133851895,
'chr13': 115169878,
'chr14': 107349540,
'chr15': 102531392,
'chr16': 90354753,
'chr17': 81195210,
'chr18': 78077248,
'chr19': 59128983,
'chr20': 63025520,
'chr21': 48129895,
'chr22': 51304566,
'chrX': 155270560,
'chrY': 59373566,
'chrM': 16571,
}
hg19_arm = {
'chr1': 124535434,
'chr2': 95326171,
'chr3': 93504854,
'chr4': 52660117,
'chr5': 49405641,
'chr6': 61830166,
'chr7': 61054331,
'chr8': 46838887,
'chr9': 50367679,
'chr10': 42254935,
'chr11': 54644205,
'chr12': 37856694,
'chr13': 19000000,
'chr14': 19000000,
'chr15': 20000000,
'chr16': 38335801,
'chr17': 25263006,
'chr18': 18460898,
'chr19': 27681782,
'chr20': 29369569,
'chr21': 14288129,
'chr22': 16000000,
'chrX': 61632012,
'chrY': 13104553,
}
def get_arm(chrom, start, end=None):
if chrom not in hg19_arm:
return ''
middle = hg19_arm[chrom]
start_arm = 'p' if start <= middle else 'q'
if end:
end_arm = 'p' if end <= middle else 'q'
else:
end_arm = ''
arm = start_arm + end_arm
if arm in 'pp' or 'qq':
arm = arm[0]
return arm
| # -*- coding: utf-8 -*-
"""
utils.constants
~~~~~~~~~~~~~~~
Useful bio constants specification.
@Copyright: (c) 2017 by <NAME> (<EMAIL>).
@License: LICENSE_NAME, see LICENSE for more details.
"""
chrs = ['chr%d' % i for i in range(1, 23)] + ['chrX', 'chrY', 'chrM']
hg19_fai_bp = {
'chr1': 249250621,
'chr2': 243199373,
'chr3': 198022430,
'chr4': 191154276,
'chr5': 180915260,
'chr6': 171115067,
'chr7': 159138663,
'chr8': 146364022,
'chr9': 141213431,
'chr10': 135534747,
'chr11': 135006516,
'chr12': 133851895,
'chr13': 115169878,
'chr14': 107349540,
'chr15': 102531392,
'chr16': 90354753,
'chr17': 81195210,
'chr18': 78077248,
'chr19': 59128983,
'chr20': 63025520,
'chr21': 48129895,
'chr22': 51304566,
'chrX': 155270560,
'chrY': 59373566,
'chrM': 16571,
}
hg19_arm = {
'chr1': 124535434,
'chr2': 95326171,
'chr3': 93504854,
'chr4': 52660117,
'chr5': 49405641,
'chr6': 61830166,
'chr7': 61054331,
'chr8': 46838887,
'chr9': 50367679,
'chr10': 42254935,
'chr11': 54644205,
'chr12': 37856694,
'chr13': 19000000,
'chr14': 19000000,
'chr15': 20000000,
'chr16': 38335801,
'chr17': 25263006,
'chr18': 18460898,
'chr19': 27681782,
'chr20': 29369569,
'chr21': 14288129,
'chr22': 16000000,
'chrX': 61632012,
'chrY': 13104553,
}
def get_arm(chrom, start, end=None):
if chrom not in hg19_arm:
return ''
middle = hg19_arm[chrom]
start_arm = 'p' if start <= middle else 'q'
if end:
end_arm = 'p' if end <= middle else 'q'
else:
end_arm = ''
arm = start_arm + end_arm
if arm in 'pp' or 'qq':
arm = arm[0]
return arm
| en | 0.689751 | # -*- coding: utf-8 -*- utils.constants ~~~~~~~~~~~~~~~ Useful bio constants specification. @Copyright: (c) 2017 by <NAME> (<EMAIL>). @License: LICENSE_NAME, see LICENSE for more details. | 1.362154 | 1 |
fabric_cf/actor/security/fabric_token.py | fabric-testbed/ActorBase | 0 | 6612633 | <reponame>fabric-testbed/ActorBase
import json
import logging
import traceback
from typing import Dict, List, Any, Tuple
from fss_utils.jwt_manager import ValidateCode
from fss_utils.jwt_validate import JWTValidator
from fabric_cf.actor.core.common.constants import Constants
class TokenException(Exception):
"""
Token exception
"""
class FabricToken:
"""
Represents the Fabric Token issues by Credential Manager
"""
def __init__(self, *, token: str, jwt_validator: JWTValidator, oauth_config: dict, logger: logging.Logger):
if token is None:
raise TokenException('Token: {} is None'.format(token))
self.logger = logger
self.jwt_validator = jwt_validator
self.oauth_config = oauth_config
self.encoded_token = token
self.decoded_token = None
def get_encoded_token(self) -> str:
"""
Get Encoded token string
@return encoded token
"""
return self.encoded_token
def get_decoded_token(self) -> dict:
"""
Get Decoded token
@return Decoded token
"""
if self.decoded_token is None:
self.validate()
return self.decoded_token
def validate(self) -> dict:
"""
Validate the token
@raise Exception in case of error
"""
try:
# validate the token
verify_exp = self.oauth_config.get(Constants.PROPERTY_CONF_O_AUTH_VERIFY_EXP, True)
if self.jwt_validator is not None:
self.logger.info("Validating CI Logon token")
code, token_or_exception = self.jwt_validator.validate_jwt(token=self.encoded_token,
verify_exp=verify_exp)
if code is not ValidateCode.VALID:
self.logger.error(f"Unable to validate provided token: {code}/{token_or_exception}")
raise TokenException(f"Unable to validate provided token: {code}/{token_or_exception}")
else:
raise TokenException("JWT Token validator not initialized, skipping validation")
self.decoded_token = token_or_exception
self.logger.debug(json.dumps(self.decoded_token))
return self.decoded_token
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("Exception occurred while validating the token e: {}".format(e))
raise e
def is_decoded(self) -> bool:
"""
Check if the token is decoded
@return True if decoded, False otherwise
"""
return self.decoded_token is not None
def get_decoded_token_value(self, key: str) -> Any:
"""
Get decoded token value
@param key: key to get value
@return value
"""
if self.decoded_token is None:
self.validate()
return self.decoded_token.get(key)
def get_subject(self) -> str:
"""
Get subject
@return subject
"""
return self.get_decoded_token_value(Constants.CLAIMS_SUB)
def get_email(self) -> str:
"""
Get email
@return email
"""
return self.get_decoded_token_value(Constants.CLAIMS_EMAIL)
def get_project_and_tags(self) -> Tuple[str or None, List[str] or None]:
"""
Get projects
@return projects
"""
projects = self.get_decoded_token_value(Constants.CLAIMS_PROJECTS)
if projects is None or len(projects) != 1:
return None, None
project = ""
tag_list = []
for key, value in projects.items():
project = key
for tag in value:
tag_list.append(tag)
break
return project, tag_list
def __str__(self):
return f"Decoded Token: {self.decoded_token}"
| import json
import logging
import traceback
from typing import Dict, List, Any, Tuple
from fss_utils.jwt_manager import ValidateCode
from fss_utils.jwt_validate import JWTValidator
from fabric_cf.actor.core.common.constants import Constants
class TokenException(Exception):
"""
Token exception
"""
class FabricToken:
"""
Represents the Fabric Token issues by Credential Manager
"""
def __init__(self, *, token: str, jwt_validator: JWTValidator, oauth_config: dict, logger: logging.Logger):
if token is None:
raise TokenException('Token: {} is None'.format(token))
self.logger = logger
self.jwt_validator = jwt_validator
self.oauth_config = oauth_config
self.encoded_token = token
self.decoded_token = None
def get_encoded_token(self) -> str:
"""
Get Encoded token string
@return encoded token
"""
return self.encoded_token
def get_decoded_token(self) -> dict:
"""
Get Decoded token
@return Decoded token
"""
if self.decoded_token is None:
self.validate()
return self.decoded_token
def validate(self) -> dict:
"""
Validate the token
@raise Exception in case of error
"""
try:
# validate the token
verify_exp = self.oauth_config.get(Constants.PROPERTY_CONF_O_AUTH_VERIFY_EXP, True)
if self.jwt_validator is not None:
self.logger.info("Validating CI Logon token")
code, token_or_exception = self.jwt_validator.validate_jwt(token=self.encoded_token,
verify_exp=verify_exp)
if code is not ValidateCode.VALID:
self.logger.error(f"Unable to validate provided token: {code}/{token_or_exception}")
raise TokenException(f"Unable to validate provided token: {code}/{token_or_exception}")
else:
raise TokenException("JWT Token validator not initialized, skipping validation")
self.decoded_token = token_or_exception
self.logger.debug(json.dumps(self.decoded_token))
return self.decoded_token
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("Exception occurred while validating the token e: {}".format(e))
raise e
def is_decoded(self) -> bool:
"""
Check if the token is decoded
@return True if decoded, False otherwise
"""
return self.decoded_token is not None
def get_decoded_token_value(self, key: str) -> Any:
"""
Get decoded token value
@param key: key to get value
@return value
"""
if self.decoded_token is None:
self.validate()
return self.decoded_token.get(key)
def get_subject(self) -> str:
"""
Get subject
@return subject
"""
return self.get_decoded_token_value(Constants.CLAIMS_SUB)
def get_email(self) -> str:
"""
Get email
@return email
"""
return self.get_decoded_token_value(Constants.CLAIMS_EMAIL)
def get_project_and_tags(self) -> Tuple[str or None, List[str] or None]:
"""
Get projects
@return projects
"""
projects = self.get_decoded_token_value(Constants.CLAIMS_PROJECTS)
if projects is None or len(projects) != 1:
return None, None
project = ""
tag_list = []
for key, value in projects.items():
project = key
for tag in value:
tag_list.append(tag)
break
return project, tag_list
def __str__(self):
return f"Decoded Token: {self.decoded_token}" | en | 0.448672 | Token exception Represents the Fabric Token issues by Credential Manager Get Encoded token string @return encoded token Get Decoded token @return Decoded token Validate the token @raise Exception in case of error # validate the token Check if the token is decoded @return True if decoded, False otherwise Get decoded token value @param key: key to get value @return value Get subject @return subject Get email @return email Get projects @return projects | 2.381805 | 2 |
run.py | carverdo/scrap | 0 | 6612634 | __author__ = 'donal'
__project__ = 'ribcage'
from app import create_app
# app = create_app('development')
app = create_app('production')
if __name__ == '__main__':
app.run() | __author__ = 'donal'
__project__ = 'ribcage'
from app import create_app
# app = create_app('development')
app = create_app('production')
if __name__ == '__main__':
app.run() | en | 0.438869 | # app = create_app('development') | 1.291341 | 1 |
autorelease/github_release.py | dwhswenson/autorelease | 3 | 6612635 | <filename>autorelease/github_release.py<gh_stars>1-10
import re
import json
import requests
import git
from collections import namedtuple
ProjectOptions = namedtuple('ProjectOptions', ['repo_owner',
'repo_name',
'project_name'])
class GitHubUser(namedtuple('GitHubUser', ['username', 'token'])):
@property
def auth(self):
return (self.username, self.token)
class GitHubRepoBase(object):
"""
Parameters
----------
project: :class:`.ProjectOptions`
github_user: :class:`.GitHubUser`
"""
def __init__(self, project, github_user):
github_api_url = "https://api.github.com/"
self.project = project
self.repo_api_url = (github_api_url + "repos/" + project.repo_owner
+ "/" + project.repo_name + "/")
self.github_user = github_user
def api_get(self, url_ending, params=None):
return requests.get(url=self.repo_api_url + url_ending,
params=params,
auth=self.github_user.auth)
def api_get_json_all(self, url_ending, params=None):
# only for issues, which limit to 30 per return
my_params = {}
my_params.update(params)
my_params.update({'sort': 'updated', 'direction': 'asc'})
results = {} # we use a dict to easily look up by number
# actual return is list of values
should_continue = True
while should_continue:
local_results_req = self.api_get(url_ending, my_params)
local_results = local_results_req.json()
if local_results:
since = local_results[-1]['updated_at']
# print(local_results[-1]['updated_at'],
# local_results[0]['updated_at'])
my_params['since'] = since
local_result_dict = {result['number']: result
for result in local_results
if result['number'] not in results}
results.update(local_result_dict)
should_continue = local_result_dict
# print(results.keys())
return list(results.values())
class GitHubReleaser(GitHubRepoBase):
"""
Parameters
----------
project : :class:`.ProjectOptions`
version : str or :class:`packaging.versions.Version`
repo : :class:`git.Repo`
github_user : :class:`.GitHubUser`
Attributes
----------
release_target_commitish : str
"""
def __init__(self, project, version, repo, github_user):
super(GitHubReleaser, self).__init__(project, github_user)
self.version = version
# pr_re set in pr_pattern
self._pr_pattern = None
self.pr_re = None
self.repo = repo
self.pr_pattern = "Merge pull request #([0-9]+)"
self.release_target_commitish = "stable"
# THINGS YOU MIGHT WANT TO OVERRIDE
@property
def release_name(self):
return self.project.project_name + " " + str(self.version)
@property
def tag_name(self):
return "v" + str(self.version)
def extract_release_notes(self, text):
# TODO: make this more complicated
return text
# THINGS YOU'RE LESS LIKELY TO OVERRIDE
@property
def pr_pattern(self):
return self._pr_pattern
@pr_pattern.setter
def pr_pattern(self, value):
self._pr_pattern = value
self.pr_re = re.compile(self._pr_pattern)
def find_relevant_pr(self):
# this uses the git log to find the most recent merge from PR
# (assuming certain text in the commit log for PR merges)
found = False
commits = self.repo.iter_commits(self.release_target_commitish)
commit = next(commits)
while commit and not found:
match = self.pr_re.match(commit.message)
if match is not None:
found = True
pr_number = match.group(1) # don't like hardcoded 1
else:
commit = next(commits)
return int(pr_number)
def get_pr_data(self, pr_number):
pr_url = self.repo_api_url + "issues/" + str(pr_number)
pr_data = requests.get(pr_url, auth=self.github_user.auth).json()
return pr_data
def generate_post_data(self, draft=False, prerelease=False):
pr_number = self.find_relevant_pr()
pr_data = self.get_pr_data(pr_number)
pr_body = pr_data['body']
release_notes = self.extract_release_notes(pr_body)
post_data = {
'tag_name': self.tag_name,
'target_commitish': self.release_target_commitish,
'name': self.release_name,
'body': release_notes,
'draft': draft,
'prerelease': prerelease
}
return post_data
def create_release(self, draft=False, prerelease=False):
post_data = json.dumps(self.generate_post_data())
post_status = requests.post(self.repo_api_url + "releases",
data=post_data,
auth=self.github_user.auth)
| <filename>autorelease/github_release.py<gh_stars>1-10
import re
import json
import requests
import git
from collections import namedtuple
ProjectOptions = namedtuple('ProjectOptions', ['repo_owner',
'repo_name',
'project_name'])
class GitHubUser(namedtuple('GitHubUser', ['username', 'token'])):
@property
def auth(self):
return (self.username, self.token)
class GitHubRepoBase(object):
"""
Parameters
----------
project: :class:`.ProjectOptions`
github_user: :class:`.GitHubUser`
"""
def __init__(self, project, github_user):
github_api_url = "https://api.github.com/"
self.project = project
self.repo_api_url = (github_api_url + "repos/" + project.repo_owner
+ "/" + project.repo_name + "/")
self.github_user = github_user
def api_get(self, url_ending, params=None):
return requests.get(url=self.repo_api_url + url_ending,
params=params,
auth=self.github_user.auth)
def api_get_json_all(self, url_ending, params=None):
# only for issues, which limit to 30 per return
my_params = {}
my_params.update(params)
my_params.update({'sort': 'updated', 'direction': 'asc'})
results = {} # we use a dict to easily look up by number
# actual return is list of values
should_continue = True
while should_continue:
local_results_req = self.api_get(url_ending, my_params)
local_results = local_results_req.json()
if local_results:
since = local_results[-1]['updated_at']
# print(local_results[-1]['updated_at'],
# local_results[0]['updated_at'])
my_params['since'] = since
local_result_dict = {result['number']: result
for result in local_results
if result['number'] not in results}
results.update(local_result_dict)
should_continue = local_result_dict
# print(results.keys())
return list(results.values())
class GitHubReleaser(GitHubRepoBase):
"""
Parameters
----------
project : :class:`.ProjectOptions`
version : str or :class:`packaging.versions.Version`
repo : :class:`git.Repo`
github_user : :class:`.GitHubUser`
Attributes
----------
release_target_commitish : str
"""
def __init__(self, project, version, repo, github_user):
super(GitHubReleaser, self).__init__(project, github_user)
self.version = version
# pr_re set in pr_pattern
self._pr_pattern = None
self.pr_re = None
self.repo = repo
self.pr_pattern = "Merge pull request #([0-9]+)"
self.release_target_commitish = "stable"
# THINGS YOU MIGHT WANT TO OVERRIDE
@property
def release_name(self):
return self.project.project_name + " " + str(self.version)
@property
def tag_name(self):
return "v" + str(self.version)
def extract_release_notes(self, text):
# TODO: make this more complicated
return text
# THINGS YOU'RE LESS LIKELY TO OVERRIDE
@property
def pr_pattern(self):
return self._pr_pattern
@pr_pattern.setter
def pr_pattern(self, value):
self._pr_pattern = value
self.pr_re = re.compile(self._pr_pattern)
def find_relevant_pr(self):
# this uses the git log to find the most recent merge from PR
# (assuming certain text in the commit log for PR merges)
found = False
commits = self.repo.iter_commits(self.release_target_commitish)
commit = next(commits)
while commit and not found:
match = self.pr_re.match(commit.message)
if match is not None:
found = True
pr_number = match.group(1) # don't like hardcoded 1
else:
commit = next(commits)
return int(pr_number)
def get_pr_data(self, pr_number):
pr_url = self.repo_api_url + "issues/" + str(pr_number)
pr_data = requests.get(pr_url, auth=self.github_user.auth).json()
return pr_data
def generate_post_data(self, draft=False, prerelease=False):
pr_number = self.find_relevant_pr()
pr_data = self.get_pr_data(pr_number)
pr_body = pr_data['body']
release_notes = self.extract_release_notes(pr_body)
post_data = {
'tag_name': self.tag_name,
'target_commitish': self.release_target_commitish,
'name': self.release_name,
'body': release_notes,
'draft': draft,
'prerelease': prerelease
}
return post_data
def create_release(self, draft=False, prerelease=False):
post_data = json.dumps(self.generate_post_data())
post_status = requests.post(self.repo_api_url + "releases",
data=post_data,
auth=self.github_user.auth)
| en | 0.516164 | Parameters ---------- project: :class:`.ProjectOptions` github_user: :class:`.GitHubUser` # only for issues, which limit to 30 per return # we use a dict to easily look up by number # actual return is list of values # print(local_results[-1]['updated_at'], # local_results[0]['updated_at']) # print(results.keys()) Parameters ---------- project : :class:`.ProjectOptions` version : str or :class:`packaging.versions.Version` repo : :class:`git.Repo` github_user : :class:`.GitHubUser` Attributes ---------- release_target_commitish : str # pr_re set in pr_pattern #([0-9]+)" # THINGS YOU MIGHT WANT TO OVERRIDE # TODO: make this more complicated # THINGS YOU'RE LESS LIKELY TO OVERRIDE # this uses the git log to find the most recent merge from PR # (assuming certain text in the commit log for PR merges) # don't like hardcoded 1 | 2.796727 | 3 |
examples/scratch.py | awa1k3r/plume-generation-and-analysis | 0 | 6612636 | import pyplume
import numpy as np
# Mechanism management
cti = 'test.cti'
pyplume.mech.mechFileAdd(cti) #Add mechanism file
pyplume.mech.mechFileDelete(cti) #Delete mechanism file
pyplume.mech.mechFileRestore() #Restore mechanism files
pyplume.mech.mechFileList() #list mechanism files
pyplume.tests.testMechs.runTests() #Run tests for mech management
# Model Use
pm = pyplume.model.PlumeModel.gridModel()
print(pm.connects)
# pm.buildNetwork()
# for t in np.arange(0.1,1.1,0.1):
# pm(t)
# pm.steadyState()
#
# pyplume.tests.testModel.runTests()
| import pyplume
import numpy as np
# Mechanism management
cti = 'test.cti'
pyplume.mech.mechFileAdd(cti) #Add mechanism file
pyplume.mech.mechFileDelete(cti) #Delete mechanism file
pyplume.mech.mechFileRestore() #Restore mechanism files
pyplume.mech.mechFileList() #list mechanism files
pyplume.tests.testMechs.runTests() #Run tests for mech management
# Model Use
pm = pyplume.model.PlumeModel.gridModel()
print(pm.connects)
# pm.buildNetwork()
# for t in np.arange(0.1,1.1,0.1):
# pm(t)
# pm.steadyState()
#
# pyplume.tests.testModel.runTests()
| en | 0.71055 | # Mechanism management #Add mechanism file #Delete mechanism file #Restore mechanism files #list mechanism files #Run tests for mech management # Model Use # pm.buildNetwork() # for t in np.arange(0.1,1.1,0.1): # pm(t) # pm.steadyState() # # pyplume.tests.testModel.runTests() | 2.185874 | 2 |
tests/organisation_tests.py | ironwill1023/BSS-admin | 1 | 6612637 | <filename>tests/organisation_tests.py
from smartcloudadmin.models.organization import Organization
from smartcloudadmin.models.subscription import Subscription
from smartcloudadmin.models.subscriber import Subscriber
from smartcloudadmin.utils.generators import given_name,family_name,email_address
from smartcloudadmin.exceptions import BssResourceNotFound, BSSBadData
import unittest
from smartcloudadmin.enums import State
from time import sleep
from random import randint
import os
from smartcloudadmin.config import BssConfig
##### This block ensures tests run in order.
def cmp(a, b):
return (a > b) - (a < b)
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: cmp(x, y)
class TestOrganisation(unittest.TestCase):
my_sub = None
test_org = None
test_subscriber = None
config = BssConfig()
config.log_level = "dgdsgsdgsd"
config.add_datacenter("TEST", os.environ.get("url"), (os.environ.get("username"), os.environ.get("password")))
@classmethod
def setUpClass(cls):
number = "%05d" % randint(0, 99999)
first_name = given_name()
familiy_name = family_name()
admin_email = email_address(given_name=first_name, family_name=familiy_name, org_name=f"bss-api-bvt-{number}")
created_org = Organization.create(environment="TEST", organisation_name=f"bss-api-bvt-{number}", given_name=first_name,
family_name=familiy_name, admin_email=admin_email, address_line_1=" ",
city="Cork", address_type="billing", country="Ireland")
cls.test_organisation_id = created_org.id
cls.test_org = created_org
cls.my_sub = created_org.add_subscription(part_number="D0NPULL", duration_length=8, duration_units="MONTHS", part_quantity=20 )
print("my_test_user" + number + ".isc4sb.com")
cls.test_subscriber = cls.test_org.add_subscriber(given_name="tod", family_name="todd", email_address="my_test_user" + number + "@<EMAIL>")
def test_01_get_organisation(self):
tested_org = Organization.get("TEST", self.test_org.id) #502212451 # self.test_organisation_id
assert(tested_org.id == self.test_org.id)
def test_02_suspend_organisation(self):
self.test_org.suspend()
assert(self.test_org.state == State.SUSPENDED.value)
# activate admin
def test_03_unsuspend_organisation(self):
self.test_org.unsuspend()
assert(self.test_org.state == State.ACTIVE.value)
def test_04_add_subscription_via_organisation(self):
test_org_subscription = self.test_org.add_subscription(part_number="D0NPULL", part_quantity=16,
duration_length=10, duration_units="MONTHS") # todo: Maybe use this in the next tests
assert(test_org_subscription.state == State.ACTIVE.value) # subscription is activated
assert(self.test_org.subscriptions.get(test_org_subscription.id) == test_org_subscription) # sub added to dict
def test_05_cancel_subscription_via_organisation(self):
test_org_subscription = self.test_org.add_subscription(part_number="D0NPULL", part_quantity=16,
duration_length=10, duration_units="MONTHS")
self.test_org.remove_subscription(test_org_subscription)
# test_org_subscription.delete() todo: make this a new test.
# assert(test_org_subscription.state == State.UNSET.value)
assert(self.test_org.subscriptions.get(test_org_subscription.id, "") == "") # sub should not be in the sub list
def test_06_transfer_seat(self):
number = "%05d" % randint(0, 99999)
print("-")
new_subscription = self.test_org.add_subscription(part_number="D0NRILL", part_quantity=16,
duration_length=10, duration_units="MONTHS")
new_subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
new_subscriber.entitle(new_subscription.id)
new_subscription._get_details()
print("-")
source_pre_transfer_available_seats = new_subscription.available_numbers_of_seats
sleep(3) # seems to be a delay with update sometimes.
seat = new_subscriber.seat_set[new_subscription.id]
new_subscription.transfer_seat(seat.id, self.my_sub.id)
new_subscription._get_details()
source_post_transfer_available_seats = new_subscription.available_numbers_of_seats
print(f"{source_pre_transfer_available_seats} < {source_post_transfer_available_seats}")
assert(source_pre_transfer_available_seats < source_post_transfer_available_seats)
#
# todo: add a range of roles
# def test_07_assign_role_to_new_user_via_organisation(self): #
# subscriber = self.test_org.add_subscriber()
# subscriber.activate()
# subscriber.assign_role("CustomerAdministrator")
# print(subscriber.get_role_list())
# assert("CustomerAdministrator" in subscriber.get_role_list())
def test_07_assign_role_to_new_user_via_organisation(self):
number = "%05d" % randint(0, 99999)
new_sub_id = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com").id
self.test_org.subscribers.get(new_sub_id).activate()
self.test_org.subscribers.get(new_sub_id).assign_role("CustomerAdministrator")
assert("CustomerAdministrator" in self.test_org.subscribers.get(new_sub_id).get_role_list())
def test_08_assign_already_assigned_role_via_organisation(self): # todo: should there be a warning for this?
number = "%05d" % randint(0, 99999)
new_sub_id = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com").id
self.test_org.subscribers.get(new_sub_id).activate()
self.test_org.subscribers.get(new_sub_id).assign_role("CustomerAdministrator")
self.test_org.subscribers.get(new_sub_id).assign_role("CustomerAdministrator")
assert("CustomerAdministrator" in self.test_org.subscribers.get(new_sub_id).get_role_list())
def test_09_unassign_role_via_organisation(self):
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
subscriber.unassign_role("User")
assert("User" not in subscriber.get_role_list())
def test_10_unassign_already_unassigned_role(self):
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
subscriber.unassign_role("User")
assert("User" not in subscriber.get_role_list())
def test_11_suspend_subscription(self):
self.my_sub.suspend()
assert(self.my_sub.state == State.SUSPENDED.value)
def test_12_unsuspend_subscription(self):
self.my_sub.unsuspend()
assert(self.my_sub.state == State.ACTIVE.value)
def test_13_add_subscriber(self):
number = "%05d" % randint(0, 99999)
self.test_subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
assert(self.test_subscriber.state == State.PENDING.value)
assert(self.test_org.subscribers.get(self.test_subscriber.id, None))
def test_14_activate_org_user(self):
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
assert(subscriber.state == State.ACTIVE.value)
def test_15_password_set_one_time_and_check_24_wait(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
subscriber.set_one_time_password("<PASSWORD>")
subscriber.change_password("<PASSWORD>", "<PASSWORD>!")
assert(subscriber.state == State.ACTIVE.value)
# Trying again within 24 hour wait period
with self.assertRaises(BSSBadData):
subscriber.change_password("<PASSWORD>!", "<PASSWORD>ReallySecureWith0dd_ch4r4ct3rs_")
def test_16_entitle_user(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.entitle(self.my_sub.id)
assert(self.my_sub.id in subscriber.entitlements) # todo: better assertion needed
def test_17_suspend_user(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
subscriber.suspend()
assert(subscriber.state in State.SUSPENDED.value)
def test_18_unsuspend_user(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
subscriber.suspend()
subscriber.unsuspend()
assert(subscriber.state in State.PENDING.value)
def test_19_revoke_subscriber(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber_id = subscriber.id
subscriber.entitle(self.my_sub.id)
subscriber.revoke(self.my_sub.id)
sleep(5)
try:
new_subscriber = Subscriber.get("TEST", subscriber_id=subscriber_id)
print(new_subscriber.state)
except BssResourceNotFound:
print("excepto")
state = ""
assert(self.my_sub.id not in subscriber.entitlements) # todo: better assertion needed
def test_20_soft_delete_subscriber(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.entitle(self.my_sub.id)
subscriber.delete()
assert(subscriber.state == State.SOFT_DELETED.value or subscriber.state == State.REMOVE_PENDING.value)
def test_21_restore_soft_deleted_subscriber(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.entitle(self.my_sub.id)
subscriber.delete()
if subscriber.state == State.SOFT_DELETED.value: # we can't do much about it.
subscriber.restore()
assert(subscriber.state == State.ACTIVE.value)
# In deregister pending - ignore. It's a BSSCore issue.
# move it to another test. should be org, remove use
def test_22_hard_delete_subscriber(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.entitle(self.my_sub.id)
subscriber.delete(soft_delete="false")
assert(subscriber.state == State.UNSET.value) # todo: remove pending or an exception is thrown.
assert(self.test_org.subscribers.get(self.test_subscriber.id, None))
def test_23_delete_subscription(self):
temp_subscription = self.test_org.add_subscription(part_number="D0NPULL", duration_length=8, duration_units="MONTHS", part_quantity=20 )
temp_subscription.delete()
assert(temp_subscription.state == State.UNSET.value)
# At this stage we should have 3 pending users
# 1 pending admin and 2 pending users with no subscription.
# entitle users and then activate admin using list
# def test_24_compare_org_initiization_from_id_and_from_name(self):
# org_from_id = Organization.get("TEST", self.test_org.id)
# org_from_json = my_client.get_orgs("TEST", self.test_org.name)[0]
#
# assert(org_from_id == org_from_json)
# verify objects are equal regardless of how populated.
def test_25_compare_org_initiization_from_new_org_and_org_id(self):
number="%05d" % randint(0, 99999)
first_name = given_name()
familiy_name = family_name()
admin_email = email_address(given_name=first_name, family_name=familiy_name, org_name=f"bss-api-bvt-{number}")
created_org = Organization.create(environment="TEST", organisation_name=f"bss-api-bvt-{number}", given_name=first_name,
family_name=familiy_name, admin_email=admin_email, address_line_1=" ",
city="Cork", address_type="billing", country="Ireland")
org_from_id = Organization.get("TEST", created_org.id)
assert(org_from_id == created_org)
def test_26_compare_subscriptions_initialisation_methods(self): # case to be made to make split into 2 tests. check sub adds to list after add.
new_sub = self.test_org.add_subscription(part_number="D0NPULL", duration_length=8, duration_units="MONTHS", part_quantity=20 )
sub_from_list = self.test_org.subscriptions.get(new_sub.id)
sub_from_id = Subscription.get("TEST", new_sub.id)
assert(new_sub == sub_from_id == sub_from_list)
def test_27_compare_subscribers_initialisation_methods(self): # case to be made to make split into 2 tests. check sub adds to list after add.
number="%05d" % randint(0, 99999)
new_subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc<EMAIL>.<EMAIL>")
subscriber_from_id = Subscriber.get("TEST", subscriber_id=new_subscriber.id)
# Creates Org and gets details.
org_from_id = Organization.get("TEST", new_subscriber.customer_id)
subscriber_from_customer_list = org_from_id.subscribers.get(new_subscriber.id)
assert(new_subscriber == subscriber_from_id == subscriber_from_customer_list)
def test_28_update_org(self):
tested_org = Organization.get("TEST", self.test_organisation_id)
tested_org.check_for_updates()
tested_org.add_subscription(part_number="D0NPULL", duration_length=8, duration_units="MONTHS", part_quantity=20 )
assert(tested_org.id == self.test_org.id)
def test_29_org_deletion(self):
self.test_org.delete()
assert(self.test_org.state is State.UNSET.value)
# Exception handling tests
def test_50_check_exception_organisation_not_found(self):
with self.assertRaises(BssResourceNotFound):
Organization.get("TEST", "045454")
def test_51_check_exception_subscription_not_found(self):
with self.assertRaises(BssResourceNotFound):
Subscription.get("TEST", "045454")
def test_52_check_exception_subscriber_not_found(self):
with self.assertRaises(BssResourceNotFound):
Subscriber.get("TEST", subscriber_id="045454")
def test_53_check_exception_org_bad_data(self):
with self.assertRaises(BSSBadData):
Subscription.get("TEST", "safasfswa")
def test_54_check_exception_subscription_bad_data(self):
with self.assertRaises(BSSBadData):
Subscriber.get("TEST", subscriber_id="safasfswa")
def test_55_check_exception_subscriber_bad_data(self):
with self.assertRaises(BSSBadData):
Organization.get("TEST", "safasfswa")
# Subscriber updates - NEEDS TO BE CHECKED.
#
#
# test orgs that are deregister pending
# test on orgs not found
#
#
# update transactions need help
#
# #get admin
#
# activate admin. | <filename>tests/organisation_tests.py
from smartcloudadmin.models.organization import Organization
from smartcloudadmin.models.subscription import Subscription
from smartcloudadmin.models.subscriber import Subscriber
from smartcloudadmin.utils.generators import given_name,family_name,email_address
from smartcloudadmin.exceptions import BssResourceNotFound, BSSBadData
import unittest
from smartcloudadmin.enums import State
from time import sleep
from random import randint
import os
from smartcloudadmin.config import BssConfig
##### This block ensures tests run in order.
def cmp(a, b):
return (a > b) - (a < b)
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: cmp(x, y)
class TestOrganisation(unittest.TestCase):
my_sub = None
test_org = None
test_subscriber = None
config = BssConfig()
config.log_level = "dgdsgsdgsd"
config.add_datacenter("TEST", os.environ.get("url"), (os.environ.get("username"), os.environ.get("password")))
@classmethod
def setUpClass(cls):
number = "%05d" % randint(0, 99999)
first_name = given_name()
familiy_name = family_name()
admin_email = email_address(given_name=first_name, family_name=familiy_name, org_name=f"bss-api-bvt-{number}")
created_org = Organization.create(environment="TEST", organisation_name=f"bss-api-bvt-{number}", given_name=first_name,
family_name=familiy_name, admin_email=admin_email, address_line_1=" ",
city="Cork", address_type="billing", country="Ireland")
cls.test_organisation_id = created_org.id
cls.test_org = created_org
cls.my_sub = created_org.add_subscription(part_number="D0NPULL", duration_length=8, duration_units="MONTHS", part_quantity=20 )
print("my_test_user" + number + ".isc4sb.com")
cls.test_subscriber = cls.test_org.add_subscriber(given_name="tod", family_name="todd", email_address="my_test_user" + number + "@<EMAIL>")
def test_01_get_organisation(self):
tested_org = Organization.get("TEST", self.test_org.id) #502212451 # self.test_organisation_id
assert(tested_org.id == self.test_org.id)
def test_02_suspend_organisation(self):
self.test_org.suspend()
assert(self.test_org.state == State.SUSPENDED.value)
# activate admin
def test_03_unsuspend_organisation(self):
self.test_org.unsuspend()
assert(self.test_org.state == State.ACTIVE.value)
def test_04_add_subscription_via_organisation(self):
test_org_subscription = self.test_org.add_subscription(part_number="D0NPULL", part_quantity=16,
duration_length=10, duration_units="MONTHS") # todo: Maybe use this in the next tests
assert(test_org_subscription.state == State.ACTIVE.value) # subscription is activated
assert(self.test_org.subscriptions.get(test_org_subscription.id) == test_org_subscription) # sub added to dict
def test_05_cancel_subscription_via_organisation(self):
test_org_subscription = self.test_org.add_subscription(part_number="D0NPULL", part_quantity=16,
duration_length=10, duration_units="MONTHS")
self.test_org.remove_subscription(test_org_subscription)
# test_org_subscription.delete() todo: make this a new test.
# assert(test_org_subscription.state == State.UNSET.value)
assert(self.test_org.subscriptions.get(test_org_subscription.id, "") == "") # sub should not be in the sub list
def test_06_transfer_seat(self):
number = "%05d" % randint(0, 99999)
print("-")
new_subscription = self.test_org.add_subscription(part_number="D0NRILL", part_quantity=16,
duration_length=10, duration_units="MONTHS")
new_subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
new_subscriber.entitle(new_subscription.id)
new_subscription._get_details()
print("-")
source_pre_transfer_available_seats = new_subscription.available_numbers_of_seats
sleep(3) # seems to be a delay with update sometimes.
seat = new_subscriber.seat_set[new_subscription.id]
new_subscription.transfer_seat(seat.id, self.my_sub.id)
new_subscription._get_details()
source_post_transfer_available_seats = new_subscription.available_numbers_of_seats
print(f"{source_pre_transfer_available_seats} < {source_post_transfer_available_seats}")
assert(source_pre_transfer_available_seats < source_post_transfer_available_seats)
#
# todo: add a range of roles
# def test_07_assign_role_to_new_user_via_organisation(self): #
# subscriber = self.test_org.add_subscriber()
# subscriber.activate()
# subscriber.assign_role("CustomerAdministrator")
# print(subscriber.get_role_list())
# assert("CustomerAdministrator" in subscriber.get_role_list())
def test_07_assign_role_to_new_user_via_organisation(self):
number = "%05d" % randint(0, 99999)
new_sub_id = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com").id
self.test_org.subscribers.get(new_sub_id).activate()
self.test_org.subscribers.get(new_sub_id).assign_role("CustomerAdministrator")
assert("CustomerAdministrator" in self.test_org.subscribers.get(new_sub_id).get_role_list())
def test_08_assign_already_assigned_role_via_organisation(self): # todo: should there be a warning for this?
number = "%05d" % randint(0, 99999)
new_sub_id = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com").id
self.test_org.subscribers.get(new_sub_id).activate()
self.test_org.subscribers.get(new_sub_id).assign_role("CustomerAdministrator")
self.test_org.subscribers.get(new_sub_id).assign_role("CustomerAdministrator")
assert("CustomerAdministrator" in self.test_org.subscribers.get(new_sub_id).get_role_list())
def test_09_unassign_role_via_organisation(self):
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
subscriber.unassign_role("User")
assert("User" not in subscriber.get_role_list())
def test_10_unassign_already_unassigned_role(self):
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
subscriber.unassign_role("User")
assert("User" not in subscriber.get_role_list())
def test_11_suspend_subscription(self):
self.my_sub.suspend()
assert(self.my_sub.state == State.SUSPENDED.value)
def test_12_unsuspend_subscription(self):
self.my_sub.unsuspend()
assert(self.my_sub.state == State.ACTIVE.value)
def test_13_add_subscriber(self):
number = "%05d" % randint(0, 99999)
self.test_subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
assert(self.test_subscriber.state == State.PENDING.value)
assert(self.test_org.subscribers.get(self.test_subscriber.id, None))
def test_14_activate_org_user(self):
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
assert(subscriber.state == State.ACTIVE.value)
def test_15_password_set_one_time_and_check_24_wait(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
subscriber.set_one_time_password("<PASSWORD>")
subscriber.change_password("<PASSWORD>", "<PASSWORD>!")
assert(subscriber.state == State.ACTIVE.value)
# Trying again within 24 hour wait period
with self.assertRaises(BSSBadData):
subscriber.change_password("<PASSWORD>!", "<PASSWORD>ReallySecureWith0dd_ch4r4ct3rs_")
def test_16_entitle_user(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.entitle(self.my_sub.id)
assert(self.my_sub.id in subscriber.entitlements) # todo: better assertion needed
def test_17_suspend_user(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
subscriber.suspend()
assert(subscriber.state in State.SUSPENDED.value)
def test_18_unsuspend_user(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.activate()
subscriber.suspend()
subscriber.unsuspend()
assert(subscriber.state in State.PENDING.value)
def test_19_revoke_subscriber(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber_id = subscriber.id
subscriber.entitle(self.my_sub.id)
subscriber.revoke(self.my_sub.id)
sleep(5)
try:
new_subscriber = Subscriber.get("TEST", subscriber_id=subscriber_id)
print(new_subscriber.state)
except BssResourceNotFound:
print("excepto")
state = ""
assert(self.my_sub.id not in subscriber.entitlements) # todo: better assertion needed
def test_20_soft_delete_subscriber(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.entitle(self.my_sub.id)
subscriber.delete()
assert(subscriber.state == State.SOFT_DELETED.value or subscriber.state == State.REMOVE_PENDING.value)
def test_21_restore_soft_deleted_subscriber(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.entitle(self.my_sub.id)
subscriber.delete()
if subscriber.state == State.SOFT_DELETED.value: # we can't do much about it.
subscriber.restore()
assert(subscriber.state == State.ACTIVE.value)
# In deregister pending - ignore. It's a BSSCore issue.
# move it to another test. should be org, remove use
def test_22_hard_delete_subscriber(self): # todo: check for exceptions.
number = "%05d" % randint(0, 99999)
subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc4sb.com")
subscriber.entitle(self.my_sub.id)
subscriber.delete(soft_delete="false")
assert(subscriber.state == State.UNSET.value) # todo: remove pending or an exception is thrown.
assert(self.test_org.subscribers.get(self.test_subscriber.id, None))
def test_23_delete_subscription(self):
temp_subscription = self.test_org.add_subscription(part_number="D0NPULL", duration_length=8, duration_units="MONTHS", part_quantity=20 )
temp_subscription.delete()
assert(temp_subscription.state == State.UNSET.value)
# At this stage we should have 3 pending users
# 1 pending admin and 2 pending users with no subscription.
# entitle users and then activate admin using list
# def test_24_compare_org_initiization_from_id_and_from_name(self):
# org_from_id = Organization.get("TEST", self.test_org.id)
# org_from_json = my_client.get_orgs("TEST", self.test_org.name)[0]
#
# assert(org_from_id == org_from_json)
# verify objects are equal regardless of how populated.
def test_25_compare_org_initiization_from_new_org_and_org_id(self):
number="%05d" % randint(0, 99999)
first_name = given_name()
familiy_name = family_name()
admin_email = email_address(given_name=first_name, family_name=familiy_name, org_name=f"bss-api-bvt-{number}")
created_org = Organization.create(environment="TEST", organisation_name=f"bss-api-bvt-{number}", given_name=first_name,
family_name=familiy_name, admin_email=admin_email, address_line_1=" ",
city="Cork", address_type="billing", country="Ireland")
org_from_id = Organization.get("TEST", created_org.id)
assert(org_from_id == created_org)
def test_26_compare_subscriptions_initialisation_methods(self): # case to be made to make split into 2 tests. check sub adds to list after add.
new_sub = self.test_org.add_subscription(part_number="D0NPULL", duration_length=8, duration_units="MONTHS", part_quantity=20 )
sub_from_list = self.test_org.subscriptions.get(new_sub.id)
sub_from_id = Subscription.get("TEST", new_sub.id)
assert(new_sub == sub_from_id == sub_from_list)
def test_27_compare_subscribers_initialisation_methods(self): # case to be made to make split into 2 tests. check sub adds to list after add.
number="%05d" % randint(0, 99999)
new_subscriber = self.test_org.add_subscriber(given_name="James", family_name="Johnson",
email_address="my_test_user" + number + "@isc<EMAIL>.<EMAIL>")
subscriber_from_id = Subscriber.get("TEST", subscriber_id=new_subscriber.id)
# Creates Org and gets details.
org_from_id = Organization.get("TEST", new_subscriber.customer_id)
subscriber_from_customer_list = org_from_id.subscribers.get(new_subscriber.id)
assert(new_subscriber == subscriber_from_id == subscriber_from_customer_list)
def test_28_update_org(self):
tested_org = Organization.get("TEST", self.test_organisation_id)
tested_org.check_for_updates()
tested_org.add_subscription(part_number="D0NPULL", duration_length=8, duration_units="MONTHS", part_quantity=20 )
assert(tested_org.id == self.test_org.id)
def test_29_org_deletion(self):
self.test_org.delete()
assert(self.test_org.state is State.UNSET.value)
# Exception handling tests
def test_50_check_exception_organisation_not_found(self):
with self.assertRaises(BssResourceNotFound):
Organization.get("TEST", "045454")
def test_51_check_exception_subscription_not_found(self):
with self.assertRaises(BssResourceNotFound):
Subscription.get("TEST", "045454")
def test_52_check_exception_subscriber_not_found(self):
with self.assertRaises(BssResourceNotFound):
Subscriber.get("TEST", subscriber_id="045454")
def test_53_check_exception_org_bad_data(self):
with self.assertRaises(BSSBadData):
Subscription.get("TEST", "safasfswa")
def test_54_check_exception_subscription_bad_data(self):
with self.assertRaises(BSSBadData):
Subscriber.get("TEST", subscriber_id="safasfswa")
def test_55_check_exception_subscriber_bad_data(self):
with self.assertRaises(BSSBadData):
Organization.get("TEST", "safasfswa")
# Subscriber updates - NEEDS TO BE CHECKED.
#
#
# test orgs that are deregister pending
# test on orgs not found
#
#
# update transactions need help
#
# #get admin
#
# activate admin. | en | 0.670252 | ##### This block ensures tests run in order. #502212451 # self.test_organisation_id # activate admin # todo: Maybe use this in the next tests # subscription is activated # sub added to dict # test_org_subscription.delete() todo: make this a new test. # assert(test_org_subscription.state == State.UNSET.value) # sub should not be in the sub list # seems to be a delay with update sometimes. # # todo: add a range of roles # def test_07_assign_role_to_new_user_via_organisation(self): # # subscriber = self.test_org.add_subscriber() # subscriber.activate() # subscriber.assign_role("CustomerAdministrator") # print(subscriber.get_role_list()) # assert("CustomerAdministrator" in subscriber.get_role_list()) # todo: should there be a warning for this? # todo: check for exceptions. # Trying again within 24 hour wait period # todo: check for exceptions. # todo: better assertion needed # todo: check for exceptions. # todo: check for exceptions. # todo: check for exceptions. # todo: better assertion needed # todo: check for exceptions. # todo: check for exceptions. # we can't do much about it. # In deregister pending - ignore. It's a BSSCore issue. # move it to another test. should be org, remove use # todo: check for exceptions. # todo: remove pending or an exception is thrown. # At this stage we should have 3 pending users # 1 pending admin and 2 pending users with no subscription. # entitle users and then activate admin using list # def test_24_compare_org_initiization_from_id_and_from_name(self): # org_from_id = Organization.get("TEST", self.test_org.id) # org_from_json = my_client.get_orgs("TEST", self.test_org.name)[0] # # assert(org_from_id == org_from_json) # verify objects are equal regardless of how populated. # case to be made to make split into 2 tests. check sub adds to list after add. # case to be made to make split into 2 tests. check sub adds to list after add. # Creates Org and gets details. # Exception handling tests # Subscriber updates - NEEDS TO BE CHECKED. # # # test orgs that are deregister pending # test on orgs not found # # # update transactions need help # # #get admin # # activate admin. | 2.042191 | 2 |
pygimli/physics/ert/importData.py | baender/gimli | 1 | 6612638 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygimli as pg
def load(fileName, verbose=False, **kwargs):
"""Shortcut to load ERT data.
Import Data and try to assume the file format.
Use pybert importer if installed.
Parameters
----------
fileName: str
Returns
-------
data: pg.DataContainer
"""
data = pg.load(fileName)
if isinstance(data, pg.DataContainerERT):
return data
# pb = pg.optImport('pybert')
# data = pb.loadData(fileName)
# print(data)
# pg.critical("Can't import ERT data file.", fileName)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygimli as pg
def load(fileName, verbose=False, **kwargs):
"""Shortcut to load ERT data.
Import Data and try to assume the file format.
Use pybert importer if installed.
Parameters
----------
fileName: str
Returns
-------
data: pg.DataContainer
"""
data = pg.load(fileName)
if isinstance(data, pg.DataContainerERT):
return data
# pb = pg.optImport('pybert')
# data = pb.loadData(fileName)
# print(data)
# pg.critical("Can't import ERT data file.", fileName)
| en | 0.370026 | #!/usr/bin/env python # -*- coding: utf-8 -*- Shortcut to load ERT data. Import Data and try to assume the file format. Use pybert importer if installed. Parameters ---------- fileName: str Returns ------- data: pg.DataContainer # pb = pg.optImport('pybert') # data = pb.loadData(fileName) # print(data) # pg.critical("Can't import ERT data file.", fileName) | 2.594989 | 3 |
236-lowest common ancestor of a binary tree/main.py | ytong82/leetcode | 0 | 6612639 | <gh_stars>0
class Solution:
def lowestCommonAncestor(self, root, p, q):
def traverseToFindPath(root, p, q, path, ppath, qpath):
if root is None:
return
else:
path.append(root)
if root == p:
for pa in path:
ppath.append(pa)
elif root == q:
for pa in path:
qpath.append(pa)
if len(ppath) == 0 or len(qpath) == 0:
traverseToFindPath(root.left, p, q, path, ppath, qpath)
traverseToFindPath(root.right, p, q, path, ppath, qpath)
path.pop()
path = []
ppath = []
qpath = []
traverseToFindPath(root, p, q, path, ppath, qpath)
plen = len(ppath)
qlen = len(qpath)
if plen == 0 or qlen == 0:
return None
length = min(plen, qlen)
lcs = ppath[0]
for i in range(length):
if ppath[i] == qpath[i]:
lcs = ppath[i]
else:
break
return lcs | class Solution:
def lowestCommonAncestor(self, root, p, q):
def traverseToFindPath(root, p, q, path, ppath, qpath):
if root is None:
return
else:
path.append(root)
if root == p:
for pa in path:
ppath.append(pa)
elif root == q:
for pa in path:
qpath.append(pa)
if len(ppath) == 0 or len(qpath) == 0:
traverseToFindPath(root.left, p, q, path, ppath, qpath)
traverseToFindPath(root.right, p, q, path, ppath, qpath)
path.pop()
path = []
ppath = []
qpath = []
traverseToFindPath(root, p, q, path, ppath, qpath)
plen = len(ppath)
qlen = len(qpath)
if plen == 0 or qlen == 0:
return None
length = min(plen, qlen)
lcs = ppath[0]
for i in range(length):
if ppath[i] == qpath[i]:
lcs = ppath[i]
else:
break
return lcs | none | 1 | 3.184854 | 3 | |
frontend/src/cherry.py | findvid/main | 0 | 6612640 | <gh_stars>0
import cherrypy
import pymongo
import shutil
import os
import shutil
import argparse
import re
import time
import datetime
import threading
from bson.objectid import ObjectId
from sys import stdout, stderr
from time import time
import indexing as idx
import kmeanstree as tree
import processhandler as ph
# instanciate and configure an argument parser
PARSER = argparse.ArgumentParser(description='Starts a CherryPy Webserver, for the find.vid project.')
PARSER.add_argument('port', metavar='PORT',
help='The port on which the webserver will run')
PARSER.add_argument('database', metavar='DB',
help='The name of the MongoDB Database on localhost')
PARSER.add_argument('collection', metavar='COLLECTION',
help='The name of the Collection in the Database')
PARSER.add_argument('filename', metavar='FILENAME',
help='The filename where the searchtree will be saved')
PARSER.add_argument("--quiet", action="store_true",
help="No output will be created.")
PARSER.add_argument("--forcerebuild", action="store_true",
help="Rebuild the searchtree and delete existing tree files if necessary.")
# parse input arguments
ARGS = PARSER.parse_args()
PORT = ARGS.port
DBNAME = ARGS.database
COLNAME = ARGS.collection
FEATUREWEIGHT = 0.5
KSPLIT = 32
KMAX = 8
FILENAME = ARGS.filename
# Directory of this file
ROOTDIR = os.path.abspath('.')
# Directory of HTML-Templates
HTMLDIR = os.path.join(ROOTDIR, 'html')
# Establish MongoDb Connection and get db and video collection
MONGOCLIENT = pymongo.MongoClient(port=8099)
DB = MONGOCLIENT[DBNAME]
VIDEOS = DB[COLNAME]
INDEXES = DB["indexes"]
HISTORY = DB["history"][COLNAME]
# Get config from MongoDb
CONFIG = VIDEOS.find_one({'_id': 'config'})
if CONFIG == None:
VIDEOS.insert({"_id" : "config", "abspath" : "/video2/videosearch/findvid/", "videopath" : "videos", "thumbnailpath" : "thumbnails"})
CONFIG = VIDEOS.find_one({'_id': 'config'})
# Directories for Videos and Thumbnails (configured in CONFIG)
VIDEODIR = os.path.abspath(os.path.join(CONFIG['abspath'], CONFIG['videopath']))
THUMBNAILDIR = os.path.abspath(os.path.join(CONFIG['abspath'], CONFIG['thumbnailpath']))
# Directory for uploads
UPLOADDIR = os.path.abspath(os.path.join(VIDEODIR, 'uploads'))
# Multithreading
HANDLER = ph.ProcessHandler(maxProcesses=7, maxPrioritys=4)
STORETREE = os.path.join(CONFIG["abspath"], FILENAME)
SHADOWLOCK = threading.Lock()
def logInfo(message):
stdout.write("INFO: %s\n" % str(message))
def logError(message):
stderr.write("ERROR: %s\n" % str(message))
# Root of the whole CherryPy Server
class Root(object):
filterChecked = True
# Searchtree Object
TREE = None
def __init__(self):
# Build tree; CURRENTLY DONE IN MAIN
#self.TREE = tree.SearchHandler(videos=VIDEOS, name=STORETREE, featureWeight=FEATUREWEIGHT, processHandler=HANDLER)
#self.TREE.loadOrBuildTree(k=KSPLIT, imax=KMAX, forceRebuild=(ARGS.forcerebuild))
# Restart index processes in journal
cursor = INDEXES.find()
for proc in cursor:
if proc["type"] == "Transkodieren":
HANDLER.runTask(priority=1, onComplete=self.indexAndTranscodeComplete, target=self.transcodeAndIndexUpload, args=(proc["src"], proc["dst"], proc["searchable"], proc["filename"], proc["_id"]), kwargs={'restarted' : True},name=proc["_id"], onCompleteArgs=(proc["src"], proc["dst"], proc["_id"]))
else: # "Indizieren"
HANDLER.runTask(priority=0, onComplete=self.indexComplete, target=self.indexUpload, args=(proc["searchable"], proc["filename"], proc["_id"]), kwargs={'restarted' : True},name=proc["_id"], onCompleteArgs=tuple([proc["_id"]]))
logInfo("Restarting process " + proc["_id"] + " from journal")
# Returns the startpage, where the history is shown
@cherrypy.expose
def index(self):
historyVideos = HISTORY.find({}, {'_id': 1, 'vidid': 1, 'sceneid': 1}).limit(50)
content = "<h1>History</h1><br />"
if historyVideos.count() == 0:
content += "No Videos in the history."
for video in historyVideos:
if not video['vidid']:
continue
dbEntry = VIDEOS.find_one({'_id': video['vidid']}, {'scenes': 0})
vidConfig = self.configScene(dbEntry, int(video['sceneid']))
vidConfig.update({'historylink': video['_id']})
content+=self.renderTemplate('history.html', vidConfig)
config = {
'title': 'Main',
'searchterm': '',
'content': content
}
return self.renderMainTemplate(config)
@cherrypy.expose
def history(self, historyid):
historyEntry = HISTORY.find_one({'_id': ObjectId(historyid)})
if not historyEntry:
raise cherrypy.HTTPRedirect('/')
similarScenes = historyEntry['similarScenes']
content = ""
if not similarScenes:
content = 'No Scenes found for your search query.'
else:
scenes = []
for similarScene in similarScenes:
if similarScene == None:
continue
distance = similarScene[0]
similarVidid = similarScene[1][0]
similarSceneid = similarScene[1][1]
similarVideo = VIDEOS.find_one({'_id': similarVidid}, {"scenes" : 0})
if similarVideo == None:
continue
simPercent = int(self.TREE.distQuality(distance) * 100)
sceneConfig = self.configScene(similarVideo, similarSceneid)
sceneConfig.update ({
'hue': str(self.calcHue(simPercent)),
'value': str(simPercent)
})
content += self.renderTemplate('similarscene.html', sceneConfig)
config = {
'title': 'Main',
'searchterm': '',
'content': content
}
return self.renderMainTemplate(config)
# Renders a template.
# filename - The filename of the template in HTMLDIR
# config - A dictionary of all placeholders with their values
def renderTemplate(self, filename, config):
tplfile = open(os.path.join(HTMLDIR, filename)).read()
# Replace each placeholder with the information in config
for key, value in config.items():
tplfile = re.sub(re.escape('<!--###'+key.upper()+'###-->'), str(value), tplfile)
return tplfile
# Calculates HSL value for similarity label color
def calcHue(self, distance):
value = int(distance)
hsl = 120
# Calculate HUE Value between 0 and 120
hsl = value * 1.2
return hsl
# Renders the main template (template.html)
# It sets the config for the uploadwindow
# config - A dictionary of all placeholders with their values
def renderMainTemplate(self, config):
# Get the uploads
uploads = self.getUploads()
filterText = ""
if self.filterChecked:
filterText = "checked"
# Expand config with uploads
config.update({
'filter': filterText,
'videocount': uploads['videocount'],
'scenecount': uploads['scenecount'],
'uploads': uploads['uploads']
})
# Render the main template
return self.renderTemplate('template.html', config)
# Formats a time in hh:mm:ss
# frame - The framenumber for which the time should be calculated
# fps - The frames per seconds which will be used for calculation
def formatTime(self, frame, fps):
lengthInSec = int(frame/fps)
seconds = lengthInSec % 60
minutes = int(lengthInSec / 60) % 60
hours = int(lengthInSec / 60 / 60) % 60
return '%1.2d' % hours + ':' + '%1.2d' % minutes + ':' + '%1.2d' % seconds
# Returns the configuration for a given video
def configVideo(self, video):
filename = str(video['filename'])
videopath = os.path.join('/videos/', filename)
fps = int(video['fps'])
vidid = str(video['_id'])
return {
'url': videopath,
'extension': os.path.splitext(filename)[1][1:],
# TODO use the relative thumbnails path and confirm that this is the right way to do this
'thumbnail': os.path.join('/thumbnails/', os.path.splitext(os.path.basename(vidid))[0], 'scene0.jpeg'),
'videoid': vidid,
'deletelink': '/removeVideo?vidid='+vidid,
'filename': os.path.basename(filename),
'time': '0',
'length': self.formatTime(int(video['cuts'][-1]), fps)
}
# Returns configuration for an indexing process
def configIndexProc(self, indproc):
# Basically just remaps _id to videohash...
return {
'FILENAME': indproc["filename"],
'TIMESTAMP': datetime.datetime.fromtimestamp(indproc["timestamp"]).strftime('%d.%m.%Y %H:%M:%S'),
'VIDEOHASH': indproc["_id"],
'PROCESSTYPE' : indproc["type"]
}
# Returns the configuration for a given scene
def configScene(self, video, sceneid):
filename = video['filename']
vidid = video['_id']
fps = video['fps']
cuts = video['cuts']
videopath = os.path.join('/videos/', filename)
filename = os.path.basename(filename)
return {
'url': videopath,
'extension': os.path.splitext(filename)[1][1:],
'time': str(cuts[sceneid] / fps),
# TODO use the relative thumbnails path and confirm that this is the right way to do this
'thumbnail': os.path.join('/thumbnails/', os.path.splitext(os.path.basename(vidid))[0], 'scene'+str(sceneid)+'.jpeg'),
'videoid': video['_id'],
'scenecount': str(sceneid),
'starttime': self.formatTime(int(cuts[sceneid]), fps),
'filename': filename,
'endtime': self.formatTime(int(cuts[sceneid+1]), fps)
}
# Fetches all uploads from the database (upload = True)
# Returns a dictionary with {scenecount, videocount, uploads}
def getUploads(self):
# Fetch all entries in video-collection where upload = True, except config
# Sorted by Timestamp, only the 8 newest Videos
uploadsFromDb = VIDEOS.find({'upload': True, 'removed':{'$not':{'$eq': True}}},{'scenes':0}).sort([('uploadtime', -1)]).limit(8)
uploads = ""
videocount = 0
scenecount = 0
for upload in uploadsFromDb:
videocount += 1
fps = int(upload['fps'])
filename = os.path.basename(str(upload['filename']))
scenes = len(upload['cuts']) - 1 # There are n scenes and n+1 cuts!
scenecount += scenes
vidid = str(upload['_id'])
uploadconfig = {
# TODO use the relative thumbnails path and confirm that this is the right way to do this
'thumbnail': os.path.join('/thumbnails/', os.path.basename(vidid), 'scene0.jpeg'),
'videoid': vidid,
'deletelink': '/removeVideo?vidid='+vidid,
'scenecount': scenes,
'filename': filename,
'length': self.formatTime(int(upload['cuts'][-1]), fps) # Last entry in cuts is also the framecount
}
uploads += self.renderTemplate('upload.html', uploadconfig)
return {'scenecount': scenecount, 'videocount': videocount, 'uploads': uploads}
# Returns a list of all currently running indexing processes
@cherrypy.expose
def indexes(self, vidId = None):
content = ""
cursorIndexingProcesses = INDEXES.find()
# if a video ID has been passed, abort the process
if vidId:
print "Abort indexing process for video " , vidId
INDEXES.remove({"_id": vidId})
# INDEXPROCS[vidId].stop() or whatever
# Cleanup is done by callbacks if they receive an error-marker as result
HANDLER.stopProcess(name=vidId)
raise cherrypy.HTTPRedirect('/indexes')
if cursorIndexingProcesses.count() == 0:
content = "There are no videos indexing at the moment."
for indexProcess in cursorIndexingProcesses:
content += self.renderTemplate('indexes.html', self.configIndexProc(indexProcess))
config = {
'title': 'Currently Indexing',
'searchterm': '',
'content': content
}
return self.renderMainTemplate(config)
# Returns a list of videos, found by given name (GET parameter)
# name - string after which is searched
@cherrypy.expose
def search(self, name = None):
# If name is unspecified, redirect to startpage
if not name:
raise cherrypy.HTTPRedirect('/')
# Get all videos with substring: <name>
videosFromDb = VIDEOS.find({"filename": { '$regex': name}, 'removed':{'$not':{'$eq': True}}}, {"scenes" : 0})
# If no videos where found, tell the user
if videosFromDb.count() == 0:
content = 'No Videos found, for your search query: "'+name+'".'
else:
videos = []
content = ""
limit = 100
counter = 1
for video in videosFromDb:
content += self.renderTemplate('video.html', self.configVideo(video))
if counter == limit:
break
counter+=1
config = {
'title': 'Search',
'searchterm': name,
'content': content
}
return self.renderMainTemplate(config)
# Returns a list of scenes, found by similarscene search
# vidid - ID of the source video
# second - Second of the source scene in the source video
@cherrypy.expose
def searchScene(self, vidid = None, second = None):
# If one of the parameters are unspecified, redirect to startpage
if not vidid or not second:
raise cherrypy.HTTPRedirect('/')
# Get the scene where the frame is from TODO: Think of a more efficient way to do this
video = VIDEOS.find_one({'_id': str(vidid), 'removed':{'$not':{'$eq': True}}}, {'scenes' : 0})
if video == None:
content = "The source video dosen't exist (anymore)."
else:
fps = int(video['fps'])
second = float(second)
frame = int(fps*second)
sceneid = 0
for i,endframe in enumerate(video['cuts']):
if frame < endframe:
sceneid = i-1
break
similarScenes = self.TREE.search(vidHash=vidid, sceneId=sceneid, wantedNNs=100, maxTouches=10000, filterChecked=self.filterChecked)
HISTORY.insert({'timestamp': time(), 'vidid': vidid, 'sceneid': sceneid, 'similarScenes': similarScenes})
content = ""
if not similarScenes:
content = 'No Scenes found for your search query.'
else:
scenes = []
for similarScene in similarScenes:
if similarScene == None:
continue
distance = similarScene[0]
similarVidid = similarScene[1][0]
similarSceneid = similarScene[1][1]
similarVideo = VIDEOS.find_one({'_id': similarVidid}, {"scenes" : 0})
if similarVideo == None:
continue
simPercent = int(self.TREE.distQuality(distance) * 100)
sceneConfig = self.configScene(similarVideo, similarSceneid)
sceneConfig.update ({
'hue': str(self.calcHue(simPercent)),
'value': str(simPercent)
})
content += self.renderTemplate('similarscene.html', sceneConfig)
config = {
'title': 'Found Scenes',
'searchterm': '',
'content': content
}
return self.renderMainTemplate(config)
# Returns a text-version of scenes, found by similarscene search
# This function is for benchmark purposes
# vidid - ID of the source video
# frame - Framenumber of the source scene in the source video
@cherrypy.expose
def searchSceneList(self, vidid=None, frame=None, limit=100, nnlimit=1000):
# If one of the parameters are unspecified, redirect to startpage
if not vidid:
return 'ERROR! - No vidid.'
if not frame:
return 'ERROR! - No framenumber.'
# Get the scene where the frame is from TODO: Think of a more efficient way to do this
video = VIDEOS.find_one({'_id': str(vidid), 'removed':{'$not':{'$eq': True}}}, {'scenes' : 0})
sceneid = 0
for i,endframe in enumerate(video['cuts']):
if frame < endframe:
sceneid = i-1
break
similarScenes = self.TREE.search(vidHash=vidid, sceneId=sceneid, wantedNNs=int(limit), maxTouches=int(nnlimit), filterChecked=True)
result = ""
if not similarScenes:
return 'No Scenes found for your search query.'
else:
scenes = []
for similarScene in similarScenes:
if similarScene == None:
continue
similarVidid = similarScene[1][0]
similarSceneid = similarScene[1][1]
similarVideo = VIDEOS.find_one({'_id': similarVidid}, {"scenes" : 0})
result += " " + similarVideo['filename'] + " " + str( int(similarVideo['cuts'][similarSceneid]) ) + " " + str( int(similarVideo['cuts'][similarSceneid+1])-1 ) + "\n"
return result
# Returns all scenes for the given video, plus the originvideo
# vidid - ID of the originvideo
@cherrypy.expose
def video(self, vidid = None):
# If video is unspecified, redirect to startpage
if not vidid:
raise cherrypy.HTTPRedirect('/')
videoFromDb = VIDEOS.find_one({'_id': str(vidid), 'removed':{'$not':{'$eq': True}}}, {"scenes" : 0})
# If there is no video with the given vidid, redirect to startpage
if not videoFromDb:
raise cherrypy.HTTPRedirect('/')
scenes = []
# There is one scene less than cuts
for sceneid in range(len(videoFromDb['cuts'])-1):
scenes.append(self.renderTemplate('scene.html', self.configScene(videoFromDb, sceneid)))
# Wrap the videos in "scene-wrap" div
content = '<div class="scene-wrap">'
for scene in scenes:
content += scene
content += "</div>"
content += self.renderTemplate('originvideo.html', self.configVideo(videoFromDb))
config = {
'title': 'Scenes',
'searchterm': '',
'content': content
}
return self.renderMainTemplate(config)
@cherrypy.expose
def removeVideo(self, vidid):
# If video is unspecified, redirect to startpage
if not vidid:
raise cherrypy.HTTPRedirect('/')
self.TREE.deleteVideo(vidid)
VIDEOS.update({'_id': vidid}, {'$set': {'removed': True}})
raise cherrypy.HTTPRedirect('/')
@cherrypy.expose
def shadowTree(self):
print "Try to Shadow Tree"
SHADOWLOCK.acquire()
try:
if self.TREE.shadowCopy == None:
self.TREE.shadowCopy = tree.SearchHandler(videos=VIDEOS, name=STORETREE + "_" + str(int(time())), featureWeight=FEATUREWEIGHT, processHandler=HANDLER)
else:
return
finally:
SHADOWLOCK.release()
self.TREE.shadowCopy.loadOrBuildTree(k=KSPLIT, imax=KMAX, forceRebuild=True)
self.TREE = self.TREE.shadowCopy
logInfo("Tree was built and swapped!")
# Uploads a video to the server, writes it to database and start processing
# This function is intended to be called by javascript only.
@cherrypy.expose
def upload(self, searchable):
cherrypy.response.timeout = 1000000
allowedExtensions = [".avi", ".mp4", ".mpg", ".mkv", ".flv", ".webm", ".mov"]
if bool(searchable):
priority = 0
else:
priority = 2
filename = os.path.basename(cherrypy.request.headers['x-filename'])
basename = os.path.splitext(filename)[0]
extension = os.path.splitext(filename)[1]
if not extension in allowedExtensions:
logError("Filetype '%s' is not within allowed extensions!" % extension)
return "ERROR: Wrong file extension."
destination = os.path.join(UPLOADDIR, filename)
i = 2
while os.path.exists(destination) or os.path.exists(os.path.splitext(destination)[0] + '.mp4'):
destination = os.path.join(UPLOADDIR, basename + "_" + "%1.2d" % i + extension)
logInfo('File already exists, renaming to %s!' % destination)
i+=1
basename = os.path.splitext(os.path.basename(destination))[0]
with open(destination, 'wb') as f:
shutil.copyfileobj(cherrypy.request.body, f)
vidHash = idx.hashFile(destination, 65536)
if extension != '.mp4':
newdestination = os.path.join(UPLOADDIR, basename + ".mp4")
filename = os.path.basename(newdestination)
HANDLER.runTask(priority=priority, onComplete=self.indexAndTranscodeComplete, target=self.transcodeAndIndexUpload, args=(destination, newdestination, searchable, filename, vidHash),name=vidHash, onCompleteArgs=(destination, newdestination, vidHash))
else:
HANDLER.runTask(priority=priority, onComplete=self.indexComplete, target=self.indexUpload, args=(searchable, filename, vidHash),name=vidHash, onCompleteArgs=tuple([vidHash]))
def transcodeAndIndexUpload(self, source, destination, searchable, filename, vidHash, restarted = False):
logInfo("Transcoding Video to mp4 - '%s'" % filename)
if bool(searchable):
priority = 0
else:
priority = 2
#Create an entry in "indexes" collection
t = time()
if not restarted:
#Create an entry in "indexes" collection
index = {}
index["_id"] = vidHash
index["timestamp"] = t
index["filename"] = filename
index["src"] = source
index["dst"] = destination
index["searchable"] = searchable
index["type"] = "Transkodieren"
INDEXES.insert(index)
r = idx.transcode_video(source, destination, quiet=True)
if r != 0:
logError("Transcoding of video '%s' has failed" % filename)
#Remove the entry to mark this indexing process as done
INDEXES.remove({"_id" : vidHash, "timestamp" : t, "filename" : filename, "type" : "Transkodieren"})
logInfo("Transcoding finished - '%s'" % filename)
#if source != destination:
# os.remove(destination)
result2 = self.indexUpload(searchable, filename, vidHash, restarted=restarted)
return self.indexComplete(result2, vidHash)
#result = HANDLER.runTaskWait(priority=priority, target=self.indexUpload, args=(searchable, filename, vidHash), kwargs={'restarted' : restarted}, name=vidHash)
# self.indexComplete(result, vidHash)
def indexUpload(self, searchable, filename, vidHash, restarted = False):
logInfo("Indexing Video - '%s'" % filename)
t = time()
if not restarted:
#Create an entry in "indexes" collection
index = {}
index["_id"] = vidHash
index["timestamp"] = t
index["filename"] = filename
index["searchable"] = searchable
index["type"] = "Indizieren"
INDEXES.insert(index)
vidid = idx.index_video(DBNAME, COLNAME, vidHash, os.path.join('uploads/', filename), searchable=bool(int(searchable)), uploaded=True, thumbpath=THUMBNAILDIR)
#Remove the entry to mark this indexing process as done
INDEXES.remove({"_id" : vidHash})
logInfo("Indexing finished - '%s', removed process '%s' from journal" % (filename, vidHash))
return vidid
def indexAndTranscodeComplete(self, res, sourcefile, targetfile, vidHash):
#vidid might be an error-object generated by the processhandler
#in this case, we have to:
# delete the source video, in case transcoding was in process
# delete database entry with _id = vidid
# recursively delete thumbnails/<vidid>
# For processes that directly indexed, indexComplete is registered as callback
# delete source video
if os.path.exists(sourcefile): #Merely a defensive mechanism, should be always true
os.remove(sourcefile)
# process was killed by user, remove the targetfile aswell
if res == False and os.path.exists(targetfile) and targetfile != sourcefile:
os.remove(targetfile)
# Hack to remove transcodings from the journal for sure
INDEXES.remove({"_id" : vidHash})
return self.indexComplete(res, vidHash)
def indexComplete(self, res, vidHash):
# process died, delete thumbnails folder if it exists and
if res == False:
if os.path.exists(os.path.join(THUMBNAILDIR, vidHash)):
shutil.rmtree(os.path.join(THUMBNAILDIR, vidHash))
logInfo("Video indexing aborted. VideoID: %s" % vidHash)
elif res == None:
# TODO: error messages
logError("File already exists.")
return False
else:
self.TREE.addVideo(vidHash=vidHash)
logInfo("Video successfully completed. VideoID: %s" % vidHash)
return True
@cherrypy.expose
def toggleFilter(self):
self.filterChecked = not self.filterChecked
raise cherrypy.HTTPRedirect('/')
def killProcesses():
HANDLER.nukeEverything()
cherrypy.engine.exit()
if __name__ == '__main__':
cherrypy.config.update({
'server.socket_host': '0.0.0.0',
'server.socket_port': int(PORT)
})
if ARGS.quiet:
cherrypy.config.update({'environment': 'embedded'})
# Mount the directories which are configured
conf = {
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(ROOTDIR, 'js')
},
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(ROOTDIR, 'css')
},
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(ROOTDIR, 'images')
},
'/thumbnails': {
'tools.staticdir.on': True,
'tools.staticdir.dir': THUMBNAILDIR
},
'/videos': {
'tools.staticdir.on': True,
'tools.staticdir.dir': VIDEODIR
}
}
root = Root()
cherrypy.tree.mount(root, '/', conf)
files = os.listdir(CONFIG['abspath'])
files = sorted(files)
treefiles = []
for name in files:
if name.startswith(FILENAME):
treefiles.append(name)
if len(treefiles) == 0:
treename = os.path.join(CONFIG['abspath'], FILENAME + "_" + str(int(time())))
else:
treename = os.path.join(CONFIG['abspath'], FILENAME + "_" + treefiles[-1].split('_')[-2])
# Build Searchtree
root.TREE = tree.SearchHandler(videos=VIDEOS, name=treename, featureWeight=FEATUREWEIGHT, processHandler=HANDLER)
root.TREE.loadOrBuildTree(k=KSPLIT, imax=KMAX, forceRebuild=(ARGS.forcerebuild))
# Set body size to 0 (unlimited), cause the uploaded files could be really big
cherrypy.server.max_request_body_size = 0
cherrypy.server.socket_timeout = 3600
if hasattr(cherrypy.engine, 'block'):
# 3.1 syntax
if hasattr(cherrypy.engine, 'signal_handler'):
cherrypy.engine.signal_handler.unsubscribe()
cherrypy.engine.signal_handler.set_handler('SIGTERM', killProcesses)
cherrypy.engine.signal_handler.set_handler('SIGINT', killProcesses)
cherrypy.engine.signal_handler.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
else:
# 3.0 syntax
cherrypy.server.quickstart()
cherrypy.engine.start()
| import cherrypy
import pymongo
import shutil
import os
import shutil
import argparse
import re
import time
import datetime
import threading
from bson.objectid import ObjectId
from sys import stdout, stderr
from time import time
import indexing as idx
import kmeanstree as tree
import processhandler as ph
# instanciate and configure an argument parser
PARSER = argparse.ArgumentParser(description='Starts a CherryPy Webserver, for the find.vid project.')
PARSER.add_argument('port', metavar='PORT',
help='The port on which the webserver will run')
PARSER.add_argument('database', metavar='DB',
help='The name of the MongoDB Database on localhost')
PARSER.add_argument('collection', metavar='COLLECTION',
help='The name of the Collection in the Database')
PARSER.add_argument('filename', metavar='FILENAME',
help='The filename where the searchtree will be saved')
PARSER.add_argument("--quiet", action="store_true",
help="No output will be created.")
PARSER.add_argument("--forcerebuild", action="store_true",
help="Rebuild the searchtree and delete existing tree files if necessary.")
# parse input arguments
ARGS = PARSER.parse_args()
PORT = ARGS.port
DBNAME = ARGS.database
COLNAME = ARGS.collection
FEATUREWEIGHT = 0.5
KSPLIT = 32
KMAX = 8
FILENAME = ARGS.filename
# Directory of this file
ROOTDIR = os.path.abspath('.')
# Directory of HTML-Templates
HTMLDIR = os.path.join(ROOTDIR, 'html')
# Establish MongoDb Connection and get db and video collection
MONGOCLIENT = pymongo.MongoClient(port=8099)
DB = MONGOCLIENT[DBNAME]
VIDEOS = DB[COLNAME]
INDEXES = DB["indexes"]
HISTORY = DB["history"][COLNAME]
# Get config from MongoDb
CONFIG = VIDEOS.find_one({'_id': 'config'})
if CONFIG == None:
VIDEOS.insert({"_id" : "config", "abspath" : "/video2/videosearch/findvid/", "videopath" : "videos", "thumbnailpath" : "thumbnails"})
CONFIG = VIDEOS.find_one({'_id': 'config'})
# Directories for Videos and Thumbnails (configured in CONFIG)
VIDEODIR = os.path.abspath(os.path.join(CONFIG['abspath'], CONFIG['videopath']))
THUMBNAILDIR = os.path.abspath(os.path.join(CONFIG['abspath'], CONFIG['thumbnailpath']))
# Directory for uploads
UPLOADDIR = os.path.abspath(os.path.join(VIDEODIR, 'uploads'))
# Multithreading
HANDLER = ph.ProcessHandler(maxProcesses=7, maxPrioritys=4)
STORETREE = os.path.join(CONFIG["abspath"], FILENAME)
SHADOWLOCK = threading.Lock()
def logInfo(message):
stdout.write("INFO: %s\n" % str(message))
def logError(message):
stderr.write("ERROR: %s\n" % str(message))
# Root of the whole CherryPy Server
class Root(object):
filterChecked = True
# Searchtree Object
TREE = None
def __init__(self):
# Build tree; CURRENTLY DONE IN MAIN
#self.TREE = tree.SearchHandler(videos=VIDEOS, name=STORETREE, featureWeight=FEATUREWEIGHT, processHandler=HANDLER)
#self.TREE.loadOrBuildTree(k=KSPLIT, imax=KMAX, forceRebuild=(ARGS.forcerebuild))
# Restart index processes in journal
cursor = INDEXES.find()
for proc in cursor:
if proc["type"] == "Transkodieren":
HANDLER.runTask(priority=1, onComplete=self.indexAndTranscodeComplete, target=self.transcodeAndIndexUpload, args=(proc["src"], proc["dst"], proc["searchable"], proc["filename"], proc["_id"]), kwargs={'restarted' : True},name=proc["_id"], onCompleteArgs=(proc["src"], proc["dst"], proc["_id"]))
else: # "Indizieren"
HANDLER.runTask(priority=0, onComplete=self.indexComplete, target=self.indexUpload, args=(proc["searchable"], proc["filename"], proc["_id"]), kwargs={'restarted' : True},name=proc["_id"], onCompleteArgs=tuple([proc["_id"]]))
logInfo("Restarting process " + proc["_id"] + " from journal")
# Returns the startpage, where the history is shown
@cherrypy.expose
def index(self):
historyVideos = HISTORY.find({}, {'_id': 1, 'vidid': 1, 'sceneid': 1}).limit(50)
content = "<h1>History</h1><br />"
if historyVideos.count() == 0:
content += "No Videos in the history."
for video in historyVideos:
if not video['vidid']:
continue
dbEntry = VIDEOS.find_one({'_id': video['vidid']}, {'scenes': 0})
vidConfig = self.configScene(dbEntry, int(video['sceneid']))
vidConfig.update({'historylink': video['_id']})
content+=self.renderTemplate('history.html', vidConfig)
config = {
'title': 'Main',
'searchterm': '',
'content': content
}
return self.renderMainTemplate(config)
@cherrypy.expose
def history(self, historyid):
historyEntry = HISTORY.find_one({'_id': ObjectId(historyid)})
if not historyEntry:
raise cherrypy.HTTPRedirect('/')
similarScenes = historyEntry['similarScenes']
content = ""
if not similarScenes:
content = 'No Scenes found for your search query.'
else:
scenes = []
for similarScene in similarScenes:
if similarScene == None:
continue
distance = similarScene[0]
similarVidid = similarScene[1][0]
similarSceneid = similarScene[1][1]
similarVideo = VIDEOS.find_one({'_id': similarVidid}, {"scenes" : 0})
if similarVideo == None:
continue
simPercent = int(self.TREE.distQuality(distance) * 100)
sceneConfig = self.configScene(similarVideo, similarSceneid)
sceneConfig.update ({
'hue': str(self.calcHue(simPercent)),
'value': str(simPercent)
})
content += self.renderTemplate('similarscene.html', sceneConfig)
config = {
'title': 'Main',
'searchterm': '',
'content': content
}
return self.renderMainTemplate(config)
# Renders a template.
# filename - The filename of the template in HTMLDIR
# config - A dictionary of all placeholders with their values
def renderTemplate(self, filename, config):
tplfile = open(os.path.join(HTMLDIR, filename)).read()
# Replace each placeholder with the information in config
for key, value in config.items():
tplfile = re.sub(re.escape('<!--###'+key.upper()+'###-->'), str(value), tplfile)
return tplfile
# Calculates HSL value for similarity label color
def calcHue(self, distance):
value = int(distance)
hsl = 120
# Calculate HUE Value between 0 and 120
hsl = value * 1.2
return hsl
# Renders the main template (template.html)
# It sets the config for the uploadwindow
# config - A dictionary of all placeholders with their values
def renderMainTemplate(self, config):
# Get the uploads
uploads = self.getUploads()
filterText = ""
if self.filterChecked:
filterText = "checked"
# Expand config with uploads
config.update({
'filter': filterText,
'videocount': uploads['videocount'],
'scenecount': uploads['scenecount'],
'uploads': uploads['uploads']
})
# Render the main template
return self.renderTemplate('template.html', config)
# Formats a time in hh:mm:ss
# frame - The framenumber for which the time should be calculated
# fps - The frames per seconds which will be used for calculation
def formatTime(self, frame, fps):
lengthInSec = int(frame/fps)
seconds = lengthInSec % 60
minutes = int(lengthInSec / 60) % 60
hours = int(lengthInSec / 60 / 60) % 60
return '%1.2d' % hours + ':' + '%1.2d' % minutes + ':' + '%1.2d' % seconds
# Returns the configuration for a given video
def configVideo(self, video):
filename = str(video['filename'])
videopath = os.path.join('/videos/', filename)
fps = int(video['fps'])
vidid = str(video['_id'])
return {
'url': videopath,
'extension': os.path.splitext(filename)[1][1:],
# TODO use the relative thumbnails path and confirm that this is the right way to do this
'thumbnail': os.path.join('/thumbnails/', os.path.splitext(os.path.basename(vidid))[0], 'scene0.jpeg'),
'videoid': vidid,
'deletelink': '/removeVideo?vidid='+vidid,
'filename': os.path.basename(filename),
'time': '0',
'length': self.formatTime(int(video['cuts'][-1]), fps)
}
# Returns configuration for an indexing process
def configIndexProc(self, indproc):
# Basically just remaps _id to videohash...
return {
'FILENAME': indproc["filename"],
'TIMESTAMP': datetime.datetime.fromtimestamp(indproc["timestamp"]).strftime('%d.%m.%Y %H:%M:%S'),
'VIDEOHASH': indproc["_id"],
'PROCESSTYPE' : indproc["type"]
}
# Returns the configuration for a given scene
def configScene(self, video, sceneid):
filename = video['filename']
vidid = video['_id']
fps = video['fps']
cuts = video['cuts']
videopath = os.path.join('/videos/', filename)
filename = os.path.basename(filename)
return {
'url': videopath,
'extension': os.path.splitext(filename)[1][1:],
'time': str(cuts[sceneid] / fps),
# TODO use the relative thumbnails path and confirm that this is the right way to do this
'thumbnail': os.path.join('/thumbnails/', os.path.splitext(os.path.basename(vidid))[0], 'scene'+str(sceneid)+'.jpeg'),
'videoid': video['_id'],
'scenecount': str(sceneid),
'starttime': self.formatTime(int(cuts[sceneid]), fps),
'filename': filename,
'endtime': self.formatTime(int(cuts[sceneid+1]), fps)
}
# Fetches all uploads from the database (upload = True)
# Returns a dictionary with {scenecount, videocount, uploads}
def getUploads(self):
# Fetch all entries in video-collection where upload = True, except config
# Sorted by Timestamp, only the 8 newest Videos
uploadsFromDb = VIDEOS.find({'upload': True, 'removed':{'$not':{'$eq': True}}},{'scenes':0}).sort([('uploadtime', -1)]).limit(8)
uploads = ""
videocount = 0
scenecount = 0
for upload in uploadsFromDb:
videocount += 1
fps = int(upload['fps'])
filename = os.path.basename(str(upload['filename']))
scenes = len(upload['cuts']) - 1 # There are n scenes and n+1 cuts!
scenecount += scenes
vidid = str(upload['_id'])
uploadconfig = {
# TODO use the relative thumbnails path and confirm that this is the right way to do this
'thumbnail': os.path.join('/thumbnails/', os.path.basename(vidid), 'scene0.jpeg'),
'videoid': vidid,
'deletelink': '/removeVideo?vidid='+vidid,
'scenecount': scenes,
'filename': filename,
'length': self.formatTime(int(upload['cuts'][-1]), fps) # Last entry in cuts is also the framecount
}
uploads += self.renderTemplate('upload.html', uploadconfig)
return {'scenecount': scenecount, 'videocount': videocount, 'uploads': uploads}
# Returns a list of all currently running indexing processes
@cherrypy.expose
def indexes(self, vidId = None):
content = ""
cursorIndexingProcesses = INDEXES.find()
# if a video ID has been passed, abort the process
if vidId:
print "Abort indexing process for video " , vidId
INDEXES.remove({"_id": vidId})
# INDEXPROCS[vidId].stop() or whatever
# Cleanup is done by callbacks if they receive an error-marker as result
HANDLER.stopProcess(name=vidId)
raise cherrypy.HTTPRedirect('/indexes')
if cursorIndexingProcesses.count() == 0:
content = "There are no videos indexing at the moment."
for indexProcess in cursorIndexingProcesses:
content += self.renderTemplate('indexes.html', self.configIndexProc(indexProcess))
config = {
'title': 'Currently Indexing',
'searchterm': '',
'content': content
}
return self.renderMainTemplate(config)
# Returns a list of videos, found by given name (GET parameter)
# name - string after which is searched
@cherrypy.expose
def search(self, name = None):
# If name is unspecified, redirect to startpage
if not name:
raise cherrypy.HTTPRedirect('/')
# Get all videos with substring: <name>
videosFromDb = VIDEOS.find({"filename": { '$regex': name}, 'removed':{'$not':{'$eq': True}}}, {"scenes" : 0})
# If no videos where found, tell the user
if videosFromDb.count() == 0:
content = 'No Videos found, for your search query: "'+name+'".'
else:
videos = []
content = ""
limit = 100
counter = 1
for video in videosFromDb:
content += self.renderTemplate('video.html', self.configVideo(video))
if counter == limit:
break
counter+=1
config = {
'title': 'Search',
'searchterm': name,
'content': content
}
return self.renderMainTemplate(config)
# Returns a list of scenes, found by similarscene search
# vidid - ID of the source video
# second - Second of the source scene in the source video
@cherrypy.expose
def searchScene(self, vidid = None, second = None):
# If one of the parameters are unspecified, redirect to startpage
if not vidid or not second:
raise cherrypy.HTTPRedirect('/')
# Get the scene where the frame is from TODO: Think of a more efficient way to do this
video = VIDEOS.find_one({'_id': str(vidid), 'removed':{'$not':{'$eq': True}}}, {'scenes' : 0})
if video == None:
content = "The source video dosen't exist (anymore)."
else:
fps = int(video['fps'])
second = float(second)
frame = int(fps*second)
sceneid = 0
for i,endframe in enumerate(video['cuts']):
if frame < endframe:
sceneid = i-1
break
similarScenes = self.TREE.search(vidHash=vidid, sceneId=sceneid, wantedNNs=100, maxTouches=10000, filterChecked=self.filterChecked)
HISTORY.insert({'timestamp': time(), 'vidid': vidid, 'sceneid': sceneid, 'similarScenes': similarScenes})
content = ""
if not similarScenes:
content = 'No Scenes found for your search query.'
else:
scenes = []
for similarScene in similarScenes:
if similarScene == None:
continue
distance = similarScene[0]
similarVidid = similarScene[1][0]
similarSceneid = similarScene[1][1]
similarVideo = VIDEOS.find_one({'_id': similarVidid}, {"scenes" : 0})
if similarVideo == None:
continue
simPercent = int(self.TREE.distQuality(distance) * 100)
sceneConfig = self.configScene(similarVideo, similarSceneid)
sceneConfig.update ({
'hue': str(self.calcHue(simPercent)),
'value': str(simPercent)
})
content += self.renderTemplate('similarscene.html', sceneConfig)
config = {
'title': 'Found Scenes',
'searchterm': '',
'content': content
}
return self.renderMainTemplate(config)
# Returns a text-version of scenes, found by similarscene search
# This function is for benchmark purposes
# vidid - ID of the source video
# frame - Framenumber of the source scene in the source video
@cherrypy.expose
def searchSceneList(self, vidid=None, frame=None, limit=100, nnlimit=1000):
# If one of the parameters are unspecified, redirect to startpage
if not vidid:
return 'ERROR! - No vidid.'
if not frame:
return 'ERROR! - No framenumber.'
# Get the scene where the frame is from TODO: Think of a more efficient way to do this
video = VIDEOS.find_one({'_id': str(vidid), 'removed':{'$not':{'$eq': True}}}, {'scenes' : 0})
sceneid = 0
for i,endframe in enumerate(video['cuts']):
if frame < endframe:
sceneid = i-1
break
similarScenes = self.TREE.search(vidHash=vidid, sceneId=sceneid, wantedNNs=int(limit), maxTouches=int(nnlimit), filterChecked=True)
result = ""
if not similarScenes:
return 'No Scenes found for your search query.'
else:
scenes = []
for similarScene in similarScenes:
if similarScene == None:
continue
similarVidid = similarScene[1][0]
similarSceneid = similarScene[1][1]
similarVideo = VIDEOS.find_one({'_id': similarVidid}, {"scenes" : 0})
result += " " + similarVideo['filename'] + " " + str( int(similarVideo['cuts'][similarSceneid]) ) + " " + str( int(similarVideo['cuts'][similarSceneid+1])-1 ) + "\n"
return result
# Returns all scenes for the given video, plus the originvideo
# vidid - ID of the originvideo
@cherrypy.expose
def video(self, vidid = None):
# If video is unspecified, redirect to startpage
if not vidid:
raise cherrypy.HTTPRedirect('/')
videoFromDb = VIDEOS.find_one({'_id': str(vidid), 'removed':{'$not':{'$eq': True}}}, {"scenes" : 0})
# If there is no video with the given vidid, redirect to startpage
if not videoFromDb:
raise cherrypy.HTTPRedirect('/')
scenes = []
# There is one scene less than cuts
for sceneid in range(len(videoFromDb['cuts'])-1):
scenes.append(self.renderTemplate('scene.html', self.configScene(videoFromDb, sceneid)))
# Wrap the videos in "scene-wrap" div
content = '<div class="scene-wrap">'
for scene in scenes:
content += scene
content += "</div>"
content += self.renderTemplate('originvideo.html', self.configVideo(videoFromDb))
config = {
'title': 'Scenes',
'searchterm': '',
'content': content
}
return self.renderMainTemplate(config)
@cherrypy.expose
def removeVideo(self, vidid):
# If video is unspecified, redirect to startpage
if not vidid:
raise cherrypy.HTTPRedirect('/')
self.TREE.deleteVideo(vidid)
VIDEOS.update({'_id': vidid}, {'$set': {'removed': True}})
raise cherrypy.HTTPRedirect('/')
@cherrypy.expose
def shadowTree(self):
print "Try to Shadow Tree"
SHADOWLOCK.acquire()
try:
if self.TREE.shadowCopy == None:
self.TREE.shadowCopy = tree.SearchHandler(videos=VIDEOS, name=STORETREE + "_" + str(int(time())), featureWeight=FEATUREWEIGHT, processHandler=HANDLER)
else:
return
finally:
SHADOWLOCK.release()
self.TREE.shadowCopy.loadOrBuildTree(k=KSPLIT, imax=KMAX, forceRebuild=True)
self.TREE = self.TREE.shadowCopy
logInfo("Tree was built and swapped!")
# Uploads a video to the server, writes it to database and start processing
# This function is intended to be called by javascript only.
@cherrypy.expose
def upload(self, searchable):
cherrypy.response.timeout = 1000000
allowedExtensions = [".avi", ".mp4", ".mpg", ".mkv", ".flv", ".webm", ".mov"]
if bool(searchable):
priority = 0
else:
priority = 2
filename = os.path.basename(cherrypy.request.headers['x-filename'])
basename = os.path.splitext(filename)[0]
extension = os.path.splitext(filename)[1]
if not extension in allowedExtensions:
logError("Filetype '%s' is not within allowed extensions!" % extension)
return "ERROR: Wrong file extension."
destination = os.path.join(UPLOADDIR, filename)
i = 2
while os.path.exists(destination) or os.path.exists(os.path.splitext(destination)[0] + '.mp4'):
destination = os.path.join(UPLOADDIR, basename + "_" + "%1.2d" % i + extension)
logInfo('File already exists, renaming to %s!' % destination)
i+=1
basename = os.path.splitext(os.path.basename(destination))[0]
with open(destination, 'wb') as f:
shutil.copyfileobj(cherrypy.request.body, f)
vidHash = idx.hashFile(destination, 65536)
if extension != '.mp4':
newdestination = os.path.join(UPLOADDIR, basename + ".mp4")
filename = os.path.basename(newdestination)
HANDLER.runTask(priority=priority, onComplete=self.indexAndTranscodeComplete, target=self.transcodeAndIndexUpload, args=(destination, newdestination, searchable, filename, vidHash),name=vidHash, onCompleteArgs=(destination, newdestination, vidHash))
else:
HANDLER.runTask(priority=priority, onComplete=self.indexComplete, target=self.indexUpload, args=(searchable, filename, vidHash),name=vidHash, onCompleteArgs=tuple([vidHash]))
def transcodeAndIndexUpload(self, source, destination, searchable, filename, vidHash, restarted = False):
logInfo("Transcoding Video to mp4 - '%s'" % filename)
if bool(searchable):
priority = 0
else:
priority = 2
#Create an entry in "indexes" collection
t = time()
if not restarted:
#Create an entry in "indexes" collection
index = {}
index["_id"] = vidHash
index["timestamp"] = t
index["filename"] = filename
index["src"] = source
index["dst"] = destination
index["searchable"] = searchable
index["type"] = "Transkodieren"
INDEXES.insert(index)
r = idx.transcode_video(source, destination, quiet=True)
if r != 0:
logError("Transcoding of video '%s' has failed" % filename)
#Remove the entry to mark this indexing process as done
INDEXES.remove({"_id" : vidHash, "timestamp" : t, "filename" : filename, "type" : "Transkodieren"})
logInfo("Transcoding finished - '%s'" % filename)
#if source != destination:
# os.remove(destination)
result2 = self.indexUpload(searchable, filename, vidHash, restarted=restarted)
return self.indexComplete(result2, vidHash)
#result = HANDLER.runTaskWait(priority=priority, target=self.indexUpload, args=(searchable, filename, vidHash), kwargs={'restarted' : restarted}, name=vidHash)
# self.indexComplete(result, vidHash)
def indexUpload(self, searchable, filename, vidHash, restarted = False):
logInfo("Indexing Video - '%s'" % filename)
t = time()
if not restarted:
#Create an entry in "indexes" collection
index = {}
index["_id"] = vidHash
index["timestamp"] = t
index["filename"] = filename
index["searchable"] = searchable
index["type"] = "Indizieren"
INDEXES.insert(index)
vidid = idx.index_video(DBNAME, COLNAME, vidHash, os.path.join('uploads/', filename), searchable=bool(int(searchable)), uploaded=True, thumbpath=THUMBNAILDIR)
#Remove the entry to mark this indexing process as done
INDEXES.remove({"_id" : vidHash})
logInfo("Indexing finished - '%s', removed process '%s' from journal" % (filename, vidHash))
return vidid
def indexAndTranscodeComplete(self, res, sourcefile, targetfile, vidHash):
#vidid might be an error-object generated by the processhandler
#in this case, we have to:
# delete the source video, in case transcoding was in process
# delete database entry with _id = vidid
# recursively delete thumbnails/<vidid>
# For processes that directly indexed, indexComplete is registered as callback
# delete source video
if os.path.exists(sourcefile): #Merely a defensive mechanism, should be always true
os.remove(sourcefile)
# process was killed by user, remove the targetfile aswell
if res == False and os.path.exists(targetfile) and targetfile != sourcefile:
os.remove(targetfile)
# Hack to remove transcodings from the journal for sure
INDEXES.remove({"_id" : vidHash})
return self.indexComplete(res, vidHash)
def indexComplete(self, res, vidHash):
# process died, delete thumbnails folder if it exists and
if res == False:
if os.path.exists(os.path.join(THUMBNAILDIR, vidHash)):
shutil.rmtree(os.path.join(THUMBNAILDIR, vidHash))
logInfo("Video indexing aborted. VideoID: %s" % vidHash)
elif res == None:
# TODO: error messages
logError("File already exists.")
return False
else:
self.TREE.addVideo(vidHash=vidHash)
logInfo("Video successfully completed. VideoID: %s" % vidHash)
return True
@cherrypy.expose
def toggleFilter(self):
self.filterChecked = not self.filterChecked
raise cherrypy.HTTPRedirect('/')
def killProcesses():
HANDLER.nukeEverything()
cherrypy.engine.exit()
if __name__ == '__main__':
cherrypy.config.update({
'server.socket_host': '0.0.0.0',
'server.socket_port': int(PORT)
})
if ARGS.quiet:
cherrypy.config.update({'environment': 'embedded'})
# Mount the directories which are configured
conf = {
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(ROOTDIR, 'js')
},
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(ROOTDIR, 'css')
},
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(ROOTDIR, 'images')
},
'/thumbnails': {
'tools.staticdir.on': True,
'tools.staticdir.dir': THUMBNAILDIR
},
'/videos': {
'tools.staticdir.on': True,
'tools.staticdir.dir': VIDEODIR
}
}
root = Root()
cherrypy.tree.mount(root, '/', conf)
files = os.listdir(CONFIG['abspath'])
files = sorted(files)
treefiles = []
for name in files:
if name.startswith(FILENAME):
treefiles.append(name)
if len(treefiles) == 0:
treename = os.path.join(CONFIG['abspath'], FILENAME + "_" + str(int(time())))
else:
treename = os.path.join(CONFIG['abspath'], FILENAME + "_" + treefiles[-1].split('_')[-2])
# Build Searchtree
root.TREE = tree.SearchHandler(videos=VIDEOS, name=treename, featureWeight=FEATUREWEIGHT, processHandler=HANDLER)
root.TREE.loadOrBuildTree(k=KSPLIT, imax=KMAX, forceRebuild=(ARGS.forcerebuild))
# Set body size to 0 (unlimited), cause the uploaded files could be really big
cherrypy.server.max_request_body_size = 0
cherrypy.server.socket_timeout = 3600
if hasattr(cherrypy.engine, 'block'):
# 3.1 syntax
if hasattr(cherrypy.engine, 'signal_handler'):
cherrypy.engine.signal_handler.unsubscribe()
cherrypy.engine.signal_handler.set_handler('SIGTERM', killProcesses)
cherrypy.engine.signal_handler.set_handler('SIGINT', killProcesses)
cherrypy.engine.signal_handler.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
else:
# 3.0 syntax
cherrypy.server.quickstart()
cherrypy.engine.start() | en | 0.782353 | # instanciate and configure an argument parser # parse input arguments # Directory of this file # Directory of HTML-Templates # Establish MongoDb Connection and get db and video collection # Get config from MongoDb # Directories for Videos and Thumbnails (configured in CONFIG) # Directory for uploads # Multithreading # Root of the whole CherryPy Server # Searchtree Object # Build tree; CURRENTLY DONE IN MAIN #self.TREE = tree.SearchHandler(videos=VIDEOS, name=STORETREE, featureWeight=FEATUREWEIGHT, processHandler=HANDLER) #self.TREE.loadOrBuildTree(k=KSPLIT, imax=KMAX, forceRebuild=(ARGS.forcerebuild)) # Restart index processes in journal # "Indizieren" # Returns the startpage, where the history is shown # Renders a template. # filename - The filename of the template in HTMLDIR # config - A dictionary of all placeholders with their values # Replace each placeholder with the information in config ###'+key.upper()+'###-->'), str(value), tplfile) # Calculates HSL value for similarity label color # Calculate HUE Value between 0 and 120 # Renders the main template (template.html) # It sets the config for the uploadwindow # config - A dictionary of all placeholders with their values # Get the uploads # Expand config with uploads # Render the main template # Formats a time in hh:mm:ss # frame - The framenumber for which the time should be calculated # fps - The frames per seconds which will be used for calculation # Returns the configuration for a given video # TODO use the relative thumbnails path and confirm that this is the right way to do this # Returns configuration for an indexing process # Basically just remaps _id to videohash... # Returns the configuration for a given scene # TODO use the relative thumbnails path and confirm that this is the right way to do this # Fetches all uploads from the database (upload = True) # Returns a dictionary with {scenecount, videocount, uploads} # Fetch all entries in video-collection where upload = True, except config # Sorted by Timestamp, only the 8 newest Videos # There are n scenes and n+1 cuts! # TODO use the relative thumbnails path and confirm that this is the right way to do this # Last entry in cuts is also the framecount # Returns a list of all currently running indexing processes # if a video ID has been passed, abort the process # INDEXPROCS[vidId].stop() or whatever # Cleanup is done by callbacks if they receive an error-marker as result # Returns a list of videos, found by given name (GET parameter) # name - string after which is searched # If name is unspecified, redirect to startpage # Get all videos with substring: <name> # If no videos where found, tell the user # Returns a list of scenes, found by similarscene search # vidid - ID of the source video # second - Second of the source scene in the source video # If one of the parameters are unspecified, redirect to startpage # Get the scene where the frame is from TODO: Think of a more efficient way to do this # Returns a text-version of scenes, found by similarscene search # This function is for benchmark purposes # vidid - ID of the source video # frame - Framenumber of the source scene in the source video # If one of the parameters are unspecified, redirect to startpage # Get the scene where the frame is from TODO: Think of a more efficient way to do this # Returns all scenes for the given video, plus the originvideo # vidid - ID of the originvideo # If video is unspecified, redirect to startpage # If there is no video with the given vidid, redirect to startpage # There is one scene less than cuts # Wrap the videos in "scene-wrap" div # If video is unspecified, redirect to startpage # Uploads a video to the server, writes it to database and start processing # This function is intended to be called by javascript only. #Create an entry in "indexes" collection #Create an entry in "indexes" collection #Remove the entry to mark this indexing process as done #if source != destination: # os.remove(destination) #result = HANDLER.runTaskWait(priority=priority, target=self.indexUpload, args=(searchable, filename, vidHash), kwargs={'restarted' : restarted}, name=vidHash) # self.indexComplete(result, vidHash) #Create an entry in "indexes" collection #Remove the entry to mark this indexing process as done #vidid might be an error-object generated by the processhandler #in this case, we have to: # delete the source video, in case transcoding was in process # delete database entry with _id = vidid # recursively delete thumbnails/<vidid> # For processes that directly indexed, indexComplete is registered as callback # delete source video #Merely a defensive mechanism, should be always true # process was killed by user, remove the targetfile aswell # Hack to remove transcodings from the journal for sure # process died, delete thumbnails folder if it exists and # TODO: error messages # Mount the directories which are configured # Build Searchtree # Set body size to 0 (unlimited), cause the uploaded files could be really big # 3.1 syntax # 3.0 syntax | 2.123301 | 2 |
pythonzestclient/pyZestClient.py | pooyadav/lib-python-databox | 0 | 6612641 | <gh_stars>0
__author__ = 'pooyadav'
import logging
import struct
import os
import binascii
import zmq
import zmq.auth
from zmq.auth.thread import ThreadAuthenticator
from pythonzestclient import pyZestUtil
import socket as sc
from pythonzestclient.exception.PyZestException import PyZestException
class PyZestClient:
def __init__(self, server_key, end_point, dealer_endpoint, logger=None):
"""
:param server_key:
:param end_point:
:param certificate_file - Client certificate file used to establish conn with the Server using CURVE zmq api
"""
self.logger = logger or logging.getLogger(__name__) #get the Logger object
self.logger.setLevel(logging.INFO) # set which kind of errors should be output (e.g. logging.INFO - starting from INFO severity level)
self.serverKey = server_key #key to the ZEST db server, usually string
self.endpoint = end_point #zest endpoint
#vs451: added dealer_endpoint assignment
self.dealer_endpoint = dealer_endpoint
self.logger.debug("Connecting to the server")
self.observers = {}
#the TRY block describes connection establishment with the server and dealer_endpoint
try:
#connection with server
ctx = zmq.Context()
auth = ThreadAuthenticator(ctx) #runs authentification as a background thread within a specific context
auth.start()
auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) #configure CURVE authentification for a given fomain ('*' - for all domains)
self.socket = ctx.socket(zmq.REQ) #initialize request socket
client_public, client_secret = zmq.curve_keypair()
#assigning public and private keys to REQ socket
self.socket.curve_secretkey = client_secret
self.socket.curve_publickey = client_public
self.socket.curve_serverkey = bytes(server_key, 'utf8')
self.socket.connect(end_point)
self.logger.info('Connection established with ' + end_point)
#connection with dealer
self.socket_d = ctx.socket(zmq.DEALER)
except zmq.ZMQError as e:
self.logger.error("Cannot establish connection" + str(e))
def post(self,path, payLoad, contentFormat,tokenString=None):
print("Inside post")
self.logger.debug("Posting data to the endpoint")
#return dictionary struct of header
header = pyZestUtil.zestHeader()
header["code"] = 2
header["token"] = tokenString
header["tkl"] = len(tokenString)
header["payload"] = payLoad
header["oc"] = 3
print(len(tokenString))
print("Token string received -- " + str(header["token"]))
# set header options as an array of dictionaries
options = []
#append Uri-path
options.append({"number":11,
"len": len(path),
"value": path,})
#append Uri-host
options.append({"number": 3,
"len": len(sc.gethostname()),
"value": sc.gethostname(),})
#append content format
options.append({"number": 12,
"len": 2,
"value": pyZestUtil.content_format_to_int(contentFormat),})
header["options"] = options
# header marshal into bytes
header_into_bytes = pyZestUtil.marshalZestHeader(header)
try:
response = self.send_request_and_await_response(header_into_bytes)
print("response from send request " + str(response))
try:
parsed_response = self.handle_response(response, self.returnPayload)
return parsed_response
except (RuntimeError, TypeError, NameError) as e:
self.logger.error("Inside Post: Error runtime or type or name - " + str(e.args) )
except ValueError as e:
self.logger.error( "Inside Post: Message sending error - " + str(e.args) )
def get(self, path, contentFormat, tokenString=None):
self.logger.debug("Inside GET: Getting data from the endpoint")
header = pyZestUtil.zestHeader()
header["code"] = 1
header["token"] = tokenString
header["tkl"] = len(tokenString)
header["oc"] = 3
# set header options
options = []
options.append({"number":11,
"len": len(path),
"value": path,})
options.append({"number": 3,
"len": len(sc.gethostname()),
"value": sc.gethostname(),})
options.append({"number": 12,
"len": 2,
"value": pyZestUtil.content_format_to_int(contentFormat),})
header["options"] = options
# header marshal into bytes
header_into_bytes = pyZestUtil.marshalZestHeader(header)
try:
response = self.send_request_and_await_response(header_into_bytes)
print("Respons from GET")
print(response)
try:
parsed_response = self.handle_response(response,self.returnPayload)
print(parsed_response)
if parsed_response is None:
return parsed_response
else:
return parsed_response
except (RuntimeError, TypeError, NameError) as e:
self.logger.error("Inside GET: Error runtime or type or name - " + str(e.args))
except ValueError as e:
self.logger.error("Inside GET: Message sending error - " + str(e.args))
#vs451: added delete method
def delete(self, path, contentFormat, tokenString=None):
self.logger.debug("Inside DELETE: deleting data from the endpoint")
header = pyZestUtil.zestHeader()
header["code"] = 4
header["token"] = tokenString
header["tkl"] = len(tokenString)
header["oc"] = 3
# set header options
options = []
options.append({"number":11,
"len": len(path),
"value": path,})
options.append({"number": 3,
"len": len(sc.gethostname()),
"value": sc.gethostname(),})
options.append({"number": 12,
"len": 2,
"value": pyZestUtil.content_format_to_int(contentFormat),})
header["options"] = options
# header marshal into bytes
header_into_bytes = pyZestUtil.marshalZestHeader(header)
try:
response = self.send_request_and_await_response(header_into_bytes)
try:
parsed_response = self.handle_response(response,self.returnPayload)
if parsed_response is None:
return parsed_response
else:
return parsed_response["payload"]
except (RuntimeError, TypeError, NameError) as e:
self.logger.error("Inside DELETE: Error runtime or type or name - " + str(e.args))
except ValueError as e:
self.logger.error("Inside DELETE: Message sending error - " + str(e.args))
#vs451: added observeMode parameter ("data" or "audit" values)
def observe(self, path, contentFormat, tokenString=None, observeMode = None, timeOut = 0):
self.logger.debug("Observing data from the endpoint")
header = pyZestUtil.zestHeader()
header["code"] = 1
header["token"] = tokenString
header["tkl"] = len(tokenString)
header["oc"] = 5
options = []
options.append({"number": 11,
"len": len(path),
"value": path,})
options.append({"number": 3,
"len": len(sc.gethostname()),
"value": sc.gethostname(),})
#Q: guess this is observe option("data" or "audit")
options.append({"number": 6,
"len": len(observeMode), #vs451 added observe Mode len assignment
"value":observeMode,}) #vs451 added observe Mode value assignment
options.append({"number": 12,
"len": 2,
"value": pyZestUtil.content_format_to_int(contentFormat),})
#append Max-Age
options.append({"number": 14,
"len": 4,
"value": timeOut,})
header["options"] = options
header_into_bytes = pyZestUtil.marshalZestHeader(header)
try:
response = self.send_request_and_await_response(header_into_bytes)
except Exception as e:
self.logger.error("Inside Observe: Message sending error - " + str(e.args))
try:
parsed_response = self.handle_response(response, self.resolve)
return parsed_response
except Exception as e:
self.logger.error("Inside Observe: Error in handling response: " + str(e.args[0]))
#return 1 vs451: made observe method to return parsed_response instead of 1
def resolve(self, header):
newCtx = zmq.Context()
dealer = newCtx.socket(zmq.DEALER)
if(dealer.closed):
print("Dealer Closed")
else:
print("Dealer is Open")
try:
dealer.setsockopt_string(zmq.IDENTITY, header["payload"])
#dealer.identity = str(header["payload"])
except Exception as e:
self.logger.error("Inside Resolve: Error setting identity - " + str(e.args))
serverKey = ""
for i in range(len(header["options"])):
if(header["options"][i]["number"] == 2048):
serverKeyOption = header["options"][i]
serverKey = serverKeyOption["value"]
try:
client_public, client_secret = zmq.curve_keypair()
except Exception as e:
self.logger.error("Inside Resolve: Error getting keypair - " + str(e.args))
try:
dealer.curve_secretkey = client_secret
dealer.curve_publickey = client_public
except Exception as e:
self.logger.error("Inside Resolve: Error setting dealer Public/Private keys - " + str(e.args))
try:
dealer.curve_serverkey = bytes(serverKey.encode('ascii'))
except Exception as e:
self.logger.error("Inside Resolve: Error setting dealer Server key - " + str(e.args))
try:
dealer.connect(self.dealer_endpoint)
print("connected to dealer")
except Exception as e:
self.logger.error("Inside Resolve: Error connecting dealer - " + str(e.args))
try:
message = dealer.recv(0)
#print(message)
except Exception as e:
self.logger.error("Inside resolve: Didn't get reponse " + str(e.args))
parsed_response = self.handle_response(message,self.returnPayload)
return parsed_response
def send_request_and_await_response(self, request):
self.logger.info(" Sending request ...")
try:
if self.socket.closed:
self.logger.error("No active connection")
else:
try:
self.socket.send(request,flags=0)
except Exception as e:
self.logger.error("Error appeared " + str(e.args))
try:
response = self.socket.recv(flags=0)
return response
except Exception as e:
self.logger.error("Didn't get reponse " + str(e.args))
except Exception as e:
self.logger.error("Cannot send request " + str(e.args))
def handle_response(self, msg, fun):
"""
:param msg: Response from the server
"""
self.logger.info(" Inside Handle Response...")
zr = pyZestUtil.parse(msg)
print("Inside handle response ", zr["code"])
try:
if zr["code"] == 65:
return zr
#vs451: added delete response code
elif zr["code"] == 66:
return fun(zr)
elif zr["code"] == 69:
#commented two following lines as want the method to return payload
#pl = fun(zr)
#return zr["payload"]
return fun(zr)
elif zr["code"]== 128:
# Code 128 corresponds to bad request
raise PyZestException(zr, "Bad Request")
elif zr["code"] == 129:
raise PyZestException(zr, "Unauthorized request")
elif zr["code"] == 143:
raise PyZestException(zr, "UnSupported content format")
else:
raise PyZestException(zr, "Invalid code" + str(zr["code"]))
except PyZestException as e:
self.logger.error("received incorrect request " + str(e.args))
def returnPayload(self, x):
return x["payload"]
def returnInput(self, x):
return x
def closeSockets(self):
self.socket.close()
def stopObserving(self):
pass
| __author__ = 'pooyadav'
import logging
import struct
import os
import binascii
import zmq
import zmq.auth
from zmq.auth.thread import ThreadAuthenticator
from pythonzestclient import pyZestUtil
import socket as sc
from pythonzestclient.exception.PyZestException import PyZestException
class PyZestClient:
def __init__(self, server_key, end_point, dealer_endpoint, logger=None):
"""
:param server_key:
:param end_point:
:param certificate_file - Client certificate file used to establish conn with the Server using CURVE zmq api
"""
self.logger = logger or logging.getLogger(__name__) #get the Logger object
self.logger.setLevel(logging.INFO) # set which kind of errors should be output (e.g. logging.INFO - starting from INFO severity level)
self.serverKey = server_key #key to the ZEST db server, usually string
self.endpoint = end_point #zest endpoint
#vs451: added dealer_endpoint assignment
self.dealer_endpoint = dealer_endpoint
self.logger.debug("Connecting to the server")
self.observers = {}
#the TRY block describes connection establishment with the server and dealer_endpoint
try:
#connection with server
ctx = zmq.Context()
auth = ThreadAuthenticator(ctx) #runs authentification as a background thread within a specific context
auth.start()
auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) #configure CURVE authentification for a given fomain ('*' - for all domains)
self.socket = ctx.socket(zmq.REQ) #initialize request socket
client_public, client_secret = zmq.curve_keypair()
#assigning public and private keys to REQ socket
self.socket.curve_secretkey = client_secret
self.socket.curve_publickey = client_public
self.socket.curve_serverkey = bytes(server_key, 'utf8')
self.socket.connect(end_point)
self.logger.info('Connection established with ' + end_point)
#connection with dealer
self.socket_d = ctx.socket(zmq.DEALER)
except zmq.ZMQError as e:
self.logger.error("Cannot establish connection" + str(e))
def post(self,path, payLoad, contentFormat,tokenString=None):
print("Inside post")
self.logger.debug("Posting data to the endpoint")
#return dictionary struct of header
header = pyZestUtil.zestHeader()
header["code"] = 2
header["token"] = tokenString
header["tkl"] = len(tokenString)
header["payload"] = payLoad
header["oc"] = 3
print(len(tokenString))
print("Token string received -- " + str(header["token"]))
# set header options as an array of dictionaries
options = []
#append Uri-path
options.append({"number":11,
"len": len(path),
"value": path,})
#append Uri-host
options.append({"number": 3,
"len": len(sc.gethostname()),
"value": sc.gethostname(),})
#append content format
options.append({"number": 12,
"len": 2,
"value": pyZestUtil.content_format_to_int(contentFormat),})
header["options"] = options
# header marshal into bytes
header_into_bytes = pyZestUtil.marshalZestHeader(header)
try:
response = self.send_request_and_await_response(header_into_bytes)
print("response from send request " + str(response))
try:
parsed_response = self.handle_response(response, self.returnPayload)
return parsed_response
except (RuntimeError, TypeError, NameError) as e:
self.logger.error("Inside Post: Error runtime or type or name - " + str(e.args) )
except ValueError as e:
self.logger.error( "Inside Post: Message sending error - " + str(e.args) )
def get(self, path, contentFormat, tokenString=None):
self.logger.debug("Inside GET: Getting data from the endpoint")
header = pyZestUtil.zestHeader()
header["code"] = 1
header["token"] = tokenString
header["tkl"] = len(tokenString)
header["oc"] = 3
# set header options
options = []
options.append({"number":11,
"len": len(path),
"value": path,})
options.append({"number": 3,
"len": len(sc.gethostname()),
"value": sc.gethostname(),})
options.append({"number": 12,
"len": 2,
"value": pyZestUtil.content_format_to_int(contentFormat),})
header["options"] = options
# header marshal into bytes
header_into_bytes = pyZestUtil.marshalZestHeader(header)
try:
response = self.send_request_and_await_response(header_into_bytes)
print("Respons from GET")
print(response)
try:
parsed_response = self.handle_response(response,self.returnPayload)
print(parsed_response)
if parsed_response is None:
return parsed_response
else:
return parsed_response
except (RuntimeError, TypeError, NameError) as e:
self.logger.error("Inside GET: Error runtime or type or name - " + str(e.args))
except ValueError as e:
self.logger.error("Inside GET: Message sending error - " + str(e.args))
#vs451: added delete method
def delete(self, path, contentFormat, tokenString=None):
self.logger.debug("Inside DELETE: deleting data from the endpoint")
header = pyZestUtil.zestHeader()
header["code"] = 4
header["token"] = tokenString
header["tkl"] = len(tokenString)
header["oc"] = 3
# set header options
options = []
options.append({"number":11,
"len": len(path),
"value": path,})
options.append({"number": 3,
"len": len(sc.gethostname()),
"value": sc.gethostname(),})
options.append({"number": 12,
"len": 2,
"value": pyZestUtil.content_format_to_int(contentFormat),})
header["options"] = options
# header marshal into bytes
header_into_bytes = pyZestUtil.marshalZestHeader(header)
try:
response = self.send_request_and_await_response(header_into_bytes)
try:
parsed_response = self.handle_response(response,self.returnPayload)
if parsed_response is None:
return parsed_response
else:
return parsed_response["payload"]
except (RuntimeError, TypeError, NameError) as e:
self.logger.error("Inside DELETE: Error runtime or type or name - " + str(e.args))
except ValueError as e:
self.logger.error("Inside DELETE: Message sending error - " + str(e.args))
#vs451: added observeMode parameter ("data" or "audit" values)
def observe(self, path, contentFormat, tokenString=None, observeMode = None, timeOut = 0):
self.logger.debug("Observing data from the endpoint")
header = pyZestUtil.zestHeader()
header["code"] = 1
header["token"] = tokenString
header["tkl"] = len(tokenString)
header["oc"] = 5
options = []
options.append({"number": 11,
"len": len(path),
"value": path,})
options.append({"number": 3,
"len": len(sc.gethostname()),
"value": sc.gethostname(),})
#Q: guess this is observe option("data" or "audit")
options.append({"number": 6,
"len": len(observeMode), #vs451 added observe Mode len assignment
"value":observeMode,}) #vs451 added observe Mode value assignment
options.append({"number": 12,
"len": 2,
"value": pyZestUtil.content_format_to_int(contentFormat),})
#append Max-Age
options.append({"number": 14,
"len": 4,
"value": timeOut,})
header["options"] = options
header_into_bytes = pyZestUtil.marshalZestHeader(header)
try:
response = self.send_request_and_await_response(header_into_bytes)
except Exception as e:
self.logger.error("Inside Observe: Message sending error - " + str(e.args))
try:
parsed_response = self.handle_response(response, self.resolve)
return parsed_response
except Exception as e:
self.logger.error("Inside Observe: Error in handling response: " + str(e.args[0]))
#return 1 vs451: made observe method to return parsed_response instead of 1
def resolve(self, header):
newCtx = zmq.Context()
dealer = newCtx.socket(zmq.DEALER)
if(dealer.closed):
print("Dealer Closed")
else:
print("Dealer is Open")
try:
dealer.setsockopt_string(zmq.IDENTITY, header["payload"])
#dealer.identity = str(header["payload"])
except Exception as e:
self.logger.error("Inside Resolve: Error setting identity - " + str(e.args))
serverKey = ""
for i in range(len(header["options"])):
if(header["options"][i]["number"] == 2048):
serverKeyOption = header["options"][i]
serverKey = serverKeyOption["value"]
try:
client_public, client_secret = zmq.curve_keypair()
except Exception as e:
self.logger.error("Inside Resolve: Error getting keypair - " + str(e.args))
try:
dealer.curve_secretkey = client_secret
dealer.curve_publickey = client_public
except Exception as e:
self.logger.error("Inside Resolve: Error setting dealer Public/Private keys - " + str(e.args))
try:
dealer.curve_serverkey = bytes(serverKey.encode('ascii'))
except Exception as e:
self.logger.error("Inside Resolve: Error setting dealer Server key - " + str(e.args))
try:
dealer.connect(self.dealer_endpoint)
print("connected to dealer")
except Exception as e:
self.logger.error("Inside Resolve: Error connecting dealer - " + str(e.args))
try:
message = dealer.recv(0)
#print(message)
except Exception as e:
self.logger.error("Inside resolve: Didn't get reponse " + str(e.args))
parsed_response = self.handle_response(message,self.returnPayload)
return parsed_response
def send_request_and_await_response(self, request):
self.logger.info(" Sending request ...")
try:
if self.socket.closed:
self.logger.error("No active connection")
else:
try:
self.socket.send(request,flags=0)
except Exception as e:
self.logger.error("Error appeared " + str(e.args))
try:
response = self.socket.recv(flags=0)
return response
except Exception as e:
self.logger.error("Didn't get reponse " + str(e.args))
except Exception as e:
self.logger.error("Cannot send request " + str(e.args))
def handle_response(self, msg, fun):
"""
:param msg: Response from the server
"""
self.logger.info(" Inside Handle Response...")
zr = pyZestUtil.parse(msg)
print("Inside handle response ", zr["code"])
try:
if zr["code"] == 65:
return zr
#vs451: added delete response code
elif zr["code"] == 66:
return fun(zr)
elif zr["code"] == 69:
#commented two following lines as want the method to return payload
#pl = fun(zr)
#return zr["payload"]
return fun(zr)
elif zr["code"]== 128:
# Code 128 corresponds to bad request
raise PyZestException(zr, "Bad Request")
elif zr["code"] == 129:
raise PyZestException(zr, "Unauthorized request")
elif zr["code"] == 143:
raise PyZestException(zr, "UnSupported content format")
else:
raise PyZestException(zr, "Invalid code" + str(zr["code"]))
except PyZestException as e:
self.logger.error("received incorrect request " + str(e.args))
def returnPayload(self, x):
return x["payload"]
def returnInput(self, x):
return x
def closeSockets(self):
self.socket.close()
def stopObserving(self):
pass | en | 0.7291 | :param server_key: :param end_point: :param certificate_file - Client certificate file used to establish conn with the Server using CURVE zmq api #get the Logger object # set which kind of errors should be output (e.g. logging.INFO - starting from INFO severity level) #key to the ZEST db server, usually string #zest endpoint #vs451: added dealer_endpoint assignment #the TRY block describes connection establishment with the server and dealer_endpoint #connection with server #runs authentification as a background thread within a specific context #configure CURVE authentification for a given fomain ('*' - for all domains) #initialize request socket #assigning public and private keys to REQ socket #connection with dealer #return dictionary struct of header # set header options as an array of dictionaries #append Uri-path #append Uri-host #append content format # header marshal into bytes # set header options # header marshal into bytes #vs451: added delete method # set header options # header marshal into bytes #vs451: added observeMode parameter ("data" or "audit" values) #Q: guess this is observe option("data" or "audit") #vs451 added observe Mode len assignment #vs451 added observe Mode value assignment #append Max-Age #return 1 vs451: made observe method to return parsed_response instead of 1 #dealer.identity = str(header["payload"]) #print(message) :param msg: Response from the server #vs451: added delete response code #commented two following lines as want the method to return payload #pl = fun(zr) #return zr["payload"] # Code 128 corresponds to bad request | 2.057132 | 2 |
feature_selection/wrapper_method/src/__init__.py | yu-9824/feature_selection | 0 | 6612642 | <filename>feature_selection/wrapper_method/src/__init__.py
from .wrapper_method import * | <filename>feature_selection/wrapper_method/src/__init__.py
from .wrapper_method import * | none | 1 | 1.15666 | 1 | |
setup.py | zettabyte/idi-python | 0 | 6612643 | <reponame>zettabyte/idi-python<filename>setup.py
# encoding: utf-8
import setuptools
with open("README.md", "r") as readme:
long_description = readme.read()
with open("idi/VERSION") as v:
version = v.read().strip()
setuptools.setup(
name = "idi",
version = version,
author = "<NAME>",
author_email = "<EMAIL>",
description = "I despise iTunes (idi) is an iTunes library tool",
long_description = long_description,
long_description_content_type = "text/markdown",
keywords = "itunes music library metadata",
packages = setuptools.find_packages(),
package_data = { "idi": ["VERSION"] },
setup_requires = ["pytest-runner>=4.2,<5"],
tests_require = ["pytest>=4.0.2,<=5"],
install_requires = ["mutagen>=1.42.0,<2", "pytz"],
python_requires = "~=3.7",
entry_points = { "console_scripts": ["idi = idi.commands:main"] },
url = "https://github.com/zettabyte/idi-python",
project_urls = {
"Source": "https://github.com/zettabyte/idi-pythpon/",
"Bugs" : "https://github.com/zettabyte/idi-pythpon/issues",
},
classifiers = [
"Development Status :: 1 - Planning",
"Environment :: MacOS X",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 3",
"Topic :: Multimedia :: Sound/Audio",
],
)
| # encoding: utf-8
import setuptools
with open("README.md", "r") as readme:
long_description = readme.read()
with open("idi/VERSION") as v:
version = v.read().strip()
setuptools.setup(
name = "idi",
version = version,
author = "<NAME>",
author_email = "<EMAIL>",
description = "I despise iTunes (idi) is an iTunes library tool",
long_description = long_description,
long_description_content_type = "text/markdown",
keywords = "itunes music library metadata",
packages = setuptools.find_packages(),
package_data = { "idi": ["VERSION"] },
setup_requires = ["pytest-runner>=4.2,<5"],
tests_require = ["pytest>=4.0.2,<=5"],
install_requires = ["mutagen>=1.42.0,<2", "pytz"],
python_requires = "~=3.7",
entry_points = { "console_scripts": ["idi = idi.commands:main"] },
url = "https://github.com/zettabyte/idi-python",
project_urls = {
"Source": "https://github.com/zettabyte/idi-pythpon/",
"Bugs" : "https://github.com/zettabyte/idi-pythpon/issues",
},
classifiers = [
"Development Status :: 1 - Planning",
"Environment :: MacOS X",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 3",
"Topic :: Multimedia :: Sound/Audio",
],
) | en | 0.83829 | # encoding: utf-8 | 1.284599 | 1 |
Chapter06/nn_classification.py | marcjour303/PytML | 36 | 6612644 | <reponame>marcjour303/PytML
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn import neighbors
from utilities import load_data
# Load input data
input_file = 'data_nn_classifier.txt'
data = load_data(input_file)
X, y = data[:,:-1], data[:,-1].astype(np.int)
# Plot input data
plt.figure()
plt.title('Input datapoints')
markers = '^sov<>hp'
mapper = np.array([markers[i] for i in y])
for i in range(X.shape[0]):
plt.scatter(X[i, 0], X[i, 1], marker=mapper[i],
s=50, edgecolors='black', facecolors='none')
plt.savefig('figure1.pdf', format='pdf', dpi=1000)
# Number of nearest neighbors to consider
num_neighbors = 10
# step size of the grid
h = 0.01
# Create a K-Neighbours Classifier model and train it
classifier = neighbors.KNeighborsClassifier(num_neighbors, weights='distance')
classifier.fit(X, y)
# Create the mesh to plot the boundaries
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
x_grid, y_grid = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Compute the outputs for all the points on the mesh
predicted_values = classifier.predict(np.c_[x_grid.ravel(), y_grid.ravel()])
# Put the computed results on the map
predicted_values = predicted_values.reshape(x_grid.shape)
plt.figure()
plt.pcolormesh(x_grid, y_grid, predicted_values, cmap=cm.Pastel1)
# Overlay the training points on the map
for i in range(X.shape[0]):
plt.scatter(X[i, 0], X[i, 1], marker=mapper[i],
s=50, edgecolors='black', facecolors='none')
plt.xlim(x_grid.min(), x_grid.max())
plt.ylim(y_grid.min(), y_grid.max())
plt.title('k nearest neighbors classifier boundaries')
plt.savefig('figure2.pdf', format='pdf', dpi=1000)
# Test input datapoint
test_datapoint = [[4.5, 3.6]]
plt.figure()
plt.title('Test datapoint')
for i in range(X.shape[0]):
plt.scatter(X[i, 0], X[i, 1], marker=mapper[i],
s=50, edgecolors='black', facecolors='none')
plt.scatter(test_datapoint[0][0], test_datapoint[0][1], marker='x',
linewidth=3, s=200, facecolors='black')
plt.savefig('figure2.pdf', format='pdf', dpi=1000)
# Extract k nearest neighbors
dist, indices = classifier.kneighbors(test_datapoint)
# Plot k nearest neighbors
plt.figure()
plt.title('k nearest neighbors')
for i in indices:
plt.scatter(X[i, 0], X[i, 1], marker='o',
linewidth=3, s=100, facecolors='black')
plt.scatter(test_datapoint[0][0], test_datapoint[0][1], marker='x',
linewidth=3, s=200, facecolors='black')
for i in range(X.shape[0]):
plt.scatter(X[i, 0], X[i, 1], marker=mapper[i],
s=50, edgecolors='black', facecolors='none')
plt.show()
plt.savefig('figure3.pdf', format='pdf', dpi=1000)
print("Predicted output:", classifier.predict(test_datapoint)[0])
| import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn import neighbors
from utilities import load_data
# Load input data
input_file = 'data_nn_classifier.txt'
data = load_data(input_file)
X, y = data[:,:-1], data[:,-1].astype(np.int)
# Plot input data
plt.figure()
plt.title('Input datapoints')
markers = '^sov<>hp'
mapper = np.array([markers[i] for i in y])
for i in range(X.shape[0]):
plt.scatter(X[i, 0], X[i, 1], marker=mapper[i],
s=50, edgecolors='black', facecolors='none')
plt.savefig('figure1.pdf', format='pdf', dpi=1000)
# Number of nearest neighbors to consider
num_neighbors = 10
# step size of the grid
h = 0.01
# Create a K-Neighbours Classifier model and train it
classifier = neighbors.KNeighborsClassifier(num_neighbors, weights='distance')
classifier.fit(X, y)
# Create the mesh to plot the boundaries
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
x_grid, y_grid = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Compute the outputs for all the points on the mesh
predicted_values = classifier.predict(np.c_[x_grid.ravel(), y_grid.ravel()])
# Put the computed results on the map
predicted_values = predicted_values.reshape(x_grid.shape)
plt.figure()
plt.pcolormesh(x_grid, y_grid, predicted_values, cmap=cm.Pastel1)
# Overlay the training points on the map
for i in range(X.shape[0]):
plt.scatter(X[i, 0], X[i, 1], marker=mapper[i],
s=50, edgecolors='black', facecolors='none')
plt.xlim(x_grid.min(), x_grid.max())
plt.ylim(y_grid.min(), y_grid.max())
plt.title('k nearest neighbors classifier boundaries')
plt.savefig('figure2.pdf', format='pdf', dpi=1000)
# Test input datapoint
test_datapoint = [[4.5, 3.6]]
plt.figure()
plt.title('Test datapoint')
for i in range(X.shape[0]):
plt.scatter(X[i, 0], X[i, 1], marker=mapper[i],
s=50, edgecolors='black', facecolors='none')
plt.scatter(test_datapoint[0][0], test_datapoint[0][1], marker='x',
linewidth=3, s=200, facecolors='black')
plt.savefig('figure2.pdf', format='pdf', dpi=1000)
# Extract k nearest neighbors
dist, indices = classifier.kneighbors(test_datapoint)
# Plot k nearest neighbors
plt.figure()
plt.title('k nearest neighbors')
for i in indices:
plt.scatter(X[i, 0], X[i, 1], marker='o',
linewidth=3, s=100, facecolors='black')
plt.scatter(test_datapoint[0][0], test_datapoint[0][1], marker='x',
linewidth=3, s=200, facecolors='black')
for i in range(X.shape[0]):
plt.scatter(X[i, 0], X[i, 1], marker=mapper[i],
s=50, edgecolors='black', facecolors='none')
plt.show()
plt.savefig('figure3.pdf', format='pdf', dpi=1000)
print("Predicted output:", classifier.predict(test_datapoint)[0]) | en | 0.807127 | # Load input data # Plot input data # Number of nearest neighbors to consider # step size of the grid # Create a K-Neighbours Classifier model and train it # Create the mesh to plot the boundaries # Compute the outputs for all the points on the mesh # Put the computed results on the map # Overlay the training points on the map # Test input datapoint # Extract k nearest neighbors # Plot k nearest neighbors | 3.228459 | 3 |
tests/test_series.py | timgates42/leather | 198 | 6612645 | <gh_stars>100-1000
#!/usr/bin/env python
import leather
from leather.utils import X, Y, Z
class TestSeries(leather.LeatherTestCase):
def test_pairs(self):
data = [
('foo', 1),
('bar', 2),
('baz', 3)
]
series = leather.Series(data)
self.assertSequenceEqual(series.values(X), ['foo', 'bar', 'baz'])
self.assertSequenceEqual(series.values(Y), [1, 2, 3])
def test_lists(self):
data = [
('foo', 1, 4),
('bar', 2, 5),
('baz', 3, 6)
]
series = leather.Series(data)
self.assertSequenceEqual(series.values(X), ['foo', 'bar', 'baz'])
self.assertSequenceEqual(series.values(Y), [1, 2, 3])
series = leather.Series(data, x=2, y=0)
self.assertSequenceEqual(series.values(X), [4, 5, 6])
self.assertSequenceEqual(series.values(Y), ['foo', 'bar', 'baz'])
with self.assertRaises(TypeError):
series = leather.Series(data, x='words')
def test_dicts(self):
data = [
{'a': 'foo', 'b': 1, 'c': 4},
{'a': 'bar', 'b': 2, 'c': 5},
{'a': 'baz', 'b': 3, 'c': 6}
]
with self.assertRaises(KeyError):
series = leather.Series(data)
series = leather.Series(data, x='c', y='a')
self.assertSequenceEqual(series.values(X), [4, 5, 6])
self.assertSequenceEqual(series.values(Y), ['foo', 'bar', 'baz'])
def test_custom(self):
class Obj(object):
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c =c
data = [
Obj('foo', 1, 4),
Obj('bar', 2, 5),
Obj('baz', 3, 6)
]
with self.assertRaises(TypeError):
series = leather.Series(data)
with self.assertRaises(TypeError):
series = leather.Series(data, x='words', y='more')
def get_x(row, i):
return row.b
def get_y(row, i):
return row.c
series = leather.Series(data, x=get_x, y=get_y)
self.assertSequenceEqual(series.values(X), [1, 2, 3])
self.assertSequenceEqual(series.values(Y), [4, 5, 6])
class TestCategorySeries(leather.LeatherTestCase):
def test_triples(self):
data = [
('foo', 1, 'a'),
('bar', 2, 'a'),
('baz', 3, 'b')
]
series = leather.CategorySeries(data)
self.assertSequenceEqual(series.values(X), ['foo', 'bar', 'baz'])
self.assertSequenceEqual(series.values(Y), [1, 2, 3])
self.assertSequenceEqual(series.values(Z), ['a', 'a', 'b'])
| #!/usr/bin/env python
import leather
from leather.utils import X, Y, Z
class TestSeries(leather.LeatherTestCase):
def test_pairs(self):
data = [
('foo', 1),
('bar', 2),
('baz', 3)
]
series = leather.Series(data)
self.assertSequenceEqual(series.values(X), ['foo', 'bar', 'baz'])
self.assertSequenceEqual(series.values(Y), [1, 2, 3])
def test_lists(self):
data = [
('foo', 1, 4),
('bar', 2, 5),
('baz', 3, 6)
]
series = leather.Series(data)
self.assertSequenceEqual(series.values(X), ['foo', 'bar', 'baz'])
self.assertSequenceEqual(series.values(Y), [1, 2, 3])
series = leather.Series(data, x=2, y=0)
self.assertSequenceEqual(series.values(X), [4, 5, 6])
self.assertSequenceEqual(series.values(Y), ['foo', 'bar', 'baz'])
with self.assertRaises(TypeError):
series = leather.Series(data, x='words')
def test_dicts(self):
data = [
{'a': 'foo', 'b': 1, 'c': 4},
{'a': 'bar', 'b': 2, 'c': 5},
{'a': 'baz', 'b': 3, 'c': 6}
]
with self.assertRaises(KeyError):
series = leather.Series(data)
series = leather.Series(data, x='c', y='a')
self.assertSequenceEqual(series.values(X), [4, 5, 6])
self.assertSequenceEqual(series.values(Y), ['foo', 'bar', 'baz'])
def test_custom(self):
class Obj(object):
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c =c
data = [
Obj('foo', 1, 4),
Obj('bar', 2, 5),
Obj('baz', 3, 6)
]
with self.assertRaises(TypeError):
series = leather.Series(data)
with self.assertRaises(TypeError):
series = leather.Series(data, x='words', y='more')
def get_x(row, i):
return row.b
def get_y(row, i):
return row.c
series = leather.Series(data, x=get_x, y=get_y)
self.assertSequenceEqual(series.values(X), [1, 2, 3])
self.assertSequenceEqual(series.values(Y), [4, 5, 6])
class TestCategorySeries(leather.LeatherTestCase):
def test_triples(self):
data = [
('foo', 1, 'a'),
('bar', 2, 'a'),
('baz', 3, 'b')
]
series = leather.CategorySeries(data)
self.assertSequenceEqual(series.values(X), ['foo', 'bar', 'baz'])
self.assertSequenceEqual(series.values(Y), [1, 2, 3])
self.assertSequenceEqual(series.values(Z), ['a', 'a', 'b']) | ru | 0.26433 | #!/usr/bin/env python | 2.587195 | 3 |
turbo_properties.py | ffsit/turbo-sticks | 0 | 6612646 | <filename>turbo_properties.py
import sys
from turbo_db import DBSession
from turbo_util import encrypt, decrypt
this = sys.modules[__name__]
this.cache = {}
def get_property(key, default=''):
value = this.cache.get(key)
if value is not None:
return value
db = DBSession()
if db is not None:
with db.connection as conn:
with conn.cursor() as cur:
sql = """
SELECT value
FROM properties
WHERE key = %s"""
cur.execute(sql, (key,))
row = cur.fetchone()
if row is None:
return default
value = decrypt(row[0])
this.cache[key] = value
return value
def set_property(key, value):
if not value or value == get_property(key):
return
db = DBSession()
if db is not None:
with db.connection as conn:
with conn.cursor() as cur:
sql = ''
if get_property(key, None) is None:
# Insert
sql = """
INSERT INTO properties
(
value,
key
)
VALUES (
%s,
%s
)"""
else:
# Update
sql = """
UPDATE properties
SET value = %s
WHERE key = %s"""
cur.execute(sql, (encrypt(value), key))
this.cache[key] = value
| <filename>turbo_properties.py
import sys
from turbo_db import DBSession
from turbo_util import encrypt, decrypt
this = sys.modules[__name__]
this.cache = {}
def get_property(key, default=''):
value = this.cache.get(key)
if value is not None:
return value
db = DBSession()
if db is not None:
with db.connection as conn:
with conn.cursor() as cur:
sql = """
SELECT value
FROM properties
WHERE key = %s"""
cur.execute(sql, (key,))
row = cur.fetchone()
if row is None:
return default
value = decrypt(row[0])
this.cache[key] = value
return value
def set_property(key, value):
if not value or value == get_property(key):
return
db = DBSession()
if db is not None:
with db.connection as conn:
with conn.cursor() as cur:
sql = ''
if get_property(key, None) is None:
# Insert
sql = """
INSERT INTO properties
(
value,
key
)
VALUES (
%s,
%s
)"""
else:
# Update
sql = """
UPDATE properties
SET value = %s
WHERE key = %s"""
cur.execute(sql, (encrypt(value), key))
this.cache[key] = value
| en | 0.405784 | SELECT value FROM properties WHERE key = %s # Insert INSERT INTO properties ( value, key ) VALUES ( %s, %s ) # Update UPDATE properties SET value = %s WHERE key = %s | 2.645793 | 3 |
arcgis_proxy/validators.py | gfw-api/arcgis-proxy | 0 | 6612647 | <reponame>gfw-api/arcgis-proxy
"""VALIDATORS"""
from functools import wraps
from arcgis_proxy.routes.api import error
from flask import request
import requests
import json
import logging
from arcgis_proxy.config.servers import servers
from arcgis_proxy.utils.services import get_image_service_url
def _validate_rendering_rule(rendering_rule):
"""Validation"""
# must have a rendering rule and rule must be a valid JSON
# logging.debug('[VALIDATOR]: validate rendering rule: {}'.format(rendering_rule))
if rendering_rule:
try:
json.loads(rendering_rule)
except ValueError:
return error(status=400, detail="renderingRule not a valid JSON")
else:
return error(status=400, detail="Must provide a valid renderingRule")
def _validate_mosaic_rule(mosaic_rule):
"""Validation"""
# may have an optional mosaic rule. Rule must be a valid JSON
# logging.debug('[VALIDATOR]: validate mosaic rule: {}'.format(mosaic_rule))
if mosaic_rule:
try:
json.loads(mosaic_rule)
except ValueError:
return error(status=400, detail="mosaicRule not a valid JSON")
else:
pass
def _validate_pixel_size(pixel_size):
"""pixelSize must be an integer or empty"""
# logging.debug('[VALIDATOR]: validate pixel size')
if pixel_size:
try:
int(pixel_size)
except ValueError:
return error(status=400, detail="pixelSize must be of Type Integer")
def _validate_geostore(geostore):
"""must have a geostore ID"""
# logging.debug('[VALIDATOR]: validate geostore')
if not geostore:
return error(status=400, detail="Must provide a valid geostore ID")
def _validate_server(server, server_url):
"""most provide server or serverUrl"""
# logging.debug('[VALIDATOR]: validate server')
if server and server not in servers.keys():
return error(status=400, detail="server not in list {}".format(servers.keys()))
# logging.debug('[VALIDATOR]: validate server url')
if not server_url and not server:
return error(status=400, detail="either server or serverUrl is required")
def _validate_service(service):
"""must provide service URI"""
# logging.debug('[VALIDATOR]: validate service')
if not service:
return error(status=400, detail="service is required")
def validate_imageserver(func):
"""serviceUrl parameter must be a valid ArcGIS Image Server instance"""
@wraps(func)
def wrapper(*args, **kwargs):
logging.info('[VALIDATOR]: validate image service')
server = request.args.get('server', None)
service = request.args.get('service', None)
server_url = request.args.get('serverUrl', None)
geostore = request.args.get('geostore', None)
pixel_size = request.args.get('pixelSize', None)
rendering_rule = request.args.get('renderingRule', None)
mosaic_rule = request.args.get('mosaicRule', None)
if mosaic_rule == '':
mosaic_rule = None
logging.debug('[VALIDATOR]: server = {}'.format(server))
logging.debug('[VALIDATOR]: service = {}'.format(service))
logging.debug('[VALIDATOR]: server_url = {}'.format(server_url))
logging.debug('[VALIDATOR]: geostore = {}'.format(geostore))
logging.debug('[VALIDATOR]: pixel_size = {}'.format(pixel_size))
logging.debug('[VALIDATOR]: rendering_rule = {}'.format(rendering_rule))
logging.debug('[VALIDATOR]: mosaic_rule = {}'.format(mosaic_rule))
v = _validate_rendering_rule(rendering_rule)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
v = _validate_mosaic_rule(mosaic_rule)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
v = _validate_geostore(geostore)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
v = _validate_pixel_size(pixel_size)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
v = _validate_server(server, server_url)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
v = _validate_service(service)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
service_url = get_image_service_url(server, server_url, service)
logging.debug('[VALIDATOR]: service_url {}'.format(service_url))
try:
r = requests.get(service_url + "?f=pjson")
if r.status_code == 200:
if not (r.json()["serviceDataType"][:16] == 'esriImageService'):
return error(status=400, detail="Not a valid Image Service URL")
else:
return error(status=400, detail="Not a valid Image Service URL")
except:
return error(status=400, detail="Not a valid Image Service URL")
return func(*args, **kwargs)
return wrapper
| """VALIDATORS"""
from functools import wraps
from arcgis_proxy.routes.api import error
from flask import request
import requests
import json
import logging
from arcgis_proxy.config.servers import servers
from arcgis_proxy.utils.services import get_image_service_url
def _validate_rendering_rule(rendering_rule):
"""Validation"""
# must have a rendering rule and rule must be a valid JSON
# logging.debug('[VALIDATOR]: validate rendering rule: {}'.format(rendering_rule))
if rendering_rule:
try:
json.loads(rendering_rule)
except ValueError:
return error(status=400, detail="renderingRule not a valid JSON")
else:
return error(status=400, detail="Must provide a valid renderingRule")
def _validate_mosaic_rule(mosaic_rule):
"""Validation"""
# may have an optional mosaic rule. Rule must be a valid JSON
# logging.debug('[VALIDATOR]: validate mosaic rule: {}'.format(mosaic_rule))
if mosaic_rule:
try:
json.loads(mosaic_rule)
except ValueError:
return error(status=400, detail="mosaicRule not a valid JSON")
else:
pass
def _validate_pixel_size(pixel_size):
"""pixelSize must be an integer or empty"""
# logging.debug('[VALIDATOR]: validate pixel size')
if pixel_size:
try:
int(pixel_size)
except ValueError:
return error(status=400, detail="pixelSize must be of Type Integer")
def _validate_geostore(geostore):
"""must have a geostore ID"""
# logging.debug('[VALIDATOR]: validate geostore')
if not geostore:
return error(status=400, detail="Must provide a valid geostore ID")
def _validate_server(server, server_url):
"""most provide server or serverUrl"""
# logging.debug('[VALIDATOR]: validate server')
if server and server not in servers.keys():
return error(status=400, detail="server not in list {}".format(servers.keys()))
# logging.debug('[VALIDATOR]: validate server url')
if not server_url and not server:
return error(status=400, detail="either server or serverUrl is required")
def _validate_service(service):
"""must provide service URI"""
# logging.debug('[VALIDATOR]: validate service')
if not service:
return error(status=400, detail="service is required")
def validate_imageserver(func):
"""serviceUrl parameter must be a valid ArcGIS Image Server instance"""
@wraps(func)
def wrapper(*args, **kwargs):
logging.info('[VALIDATOR]: validate image service')
server = request.args.get('server', None)
service = request.args.get('service', None)
server_url = request.args.get('serverUrl', None)
geostore = request.args.get('geostore', None)
pixel_size = request.args.get('pixelSize', None)
rendering_rule = request.args.get('renderingRule', None)
mosaic_rule = request.args.get('mosaicRule', None)
if mosaic_rule == '':
mosaic_rule = None
logging.debug('[VALIDATOR]: server = {}'.format(server))
logging.debug('[VALIDATOR]: service = {}'.format(service))
logging.debug('[VALIDATOR]: server_url = {}'.format(server_url))
logging.debug('[VALIDATOR]: geostore = {}'.format(geostore))
logging.debug('[VALIDATOR]: pixel_size = {}'.format(pixel_size))
logging.debug('[VALIDATOR]: rendering_rule = {}'.format(rendering_rule))
logging.debug('[VALIDATOR]: mosaic_rule = {}'.format(mosaic_rule))
v = _validate_rendering_rule(rendering_rule)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
v = _validate_mosaic_rule(mosaic_rule)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
v = _validate_geostore(geostore)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
v = _validate_pixel_size(pixel_size)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
v = _validate_server(server, server_url)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
v = _validate_service(service)
if v:
logging.debug('[VALIDATOR]: {}'.format(json.loads(v[0].data)))
return v
service_url = get_image_service_url(server, server_url, service)
logging.debug('[VALIDATOR]: service_url {}'.format(service_url))
try:
r = requests.get(service_url + "?f=pjson")
if r.status_code == 200:
if not (r.json()["serviceDataType"][:16] == 'esriImageService'):
return error(status=400, detail="Not a valid Image Service URL")
else:
return error(status=400, detail="Not a valid Image Service URL")
except:
return error(status=400, detail="Not a valid Image Service URL")
return func(*args, **kwargs)
return wrapper | en | 0.355858 | VALIDATORS Validation # must have a rendering rule and rule must be a valid JSON # logging.debug('[VALIDATOR]: validate rendering rule: {}'.format(rendering_rule)) Validation # may have an optional mosaic rule. Rule must be a valid JSON # logging.debug('[VALIDATOR]: validate mosaic rule: {}'.format(mosaic_rule)) pixelSize must be an integer or empty # logging.debug('[VALIDATOR]: validate pixel size') must have a geostore ID # logging.debug('[VALIDATOR]: validate geostore') most provide server or serverUrl # logging.debug('[VALIDATOR]: validate server') # logging.debug('[VALIDATOR]: validate server url') must provide service URI # logging.debug('[VALIDATOR]: validate service') serviceUrl parameter must be a valid ArcGIS Image Server instance | 2.489324 | 2 |
test/test_inline/test_flush.py | amcgregor/cinje | 27 | 6612648 | # encoding: utf-8
from __future__ import unicode_literals
from cinje.inline.flush import Flush
class TestInlineFlush(object):
def test_non_template_function(self):
assert 'yield' not in b': def test\n\t: pass'.decode('cinje')
def test_natural_flush(self):
assert b': def test\n\tHello.'.decode('cinje').count('yield') == 1
def test_forced_omits_natural_flush(self):
assert b': def test\n\tHello.\n\t: flush'.decode('cinje').count('yield') == 1
def test_forced_and_natural_flush(self):
assert b': def test\n\tHello.\n\t: flush\n\tWorld.'.decode('cinje').count('yield') == 2
| # encoding: utf-8
from __future__ import unicode_literals
from cinje.inline.flush import Flush
class TestInlineFlush(object):
def test_non_template_function(self):
assert 'yield' not in b': def test\n\t: pass'.decode('cinje')
def test_natural_flush(self):
assert b': def test\n\tHello.'.decode('cinje').count('yield') == 1
def test_forced_omits_natural_flush(self):
assert b': def test\n\tHello.\n\t: flush'.decode('cinje').count('yield') == 1
def test_forced_and_natural_flush(self):
assert b': def test\n\tHello.\n\t: flush\n\tWorld.'.decode('cinje').count('yield') == 2
| en | 0.83829 | # encoding: utf-8 | 2.552027 | 3 |
out-of-plane_x/label_test.py | SEMOrientation/3DSimulation | 0 | 6612649 | <filename>out-of-plane_x/label_test.py<gh_stars>0
#!/usr/bin/env python3
import os
import math
import random
EXAMPLES_PER_ROTATION = 5
INTERVAL = 5.0
for f in os.listdir():
name, ext = os.path.splitext(f)
if ext.lower() != ".png":
continue
frame = int(name)
# seed random and get the angle
random.seed(frame)
angle = math.degrees(random.random()*2*math.pi)
# properly format new filename and rename
name_ = f"{frame:04}_{angle:06.2f}"
os.rename(f, name_+ext)
| <filename>out-of-plane_x/label_test.py<gh_stars>0
#!/usr/bin/env python3
import os
import math
import random
EXAMPLES_PER_ROTATION = 5
INTERVAL = 5.0
for f in os.listdir():
name, ext = os.path.splitext(f)
if ext.lower() != ".png":
continue
frame = int(name)
# seed random and get the angle
random.seed(frame)
angle = math.degrees(random.random()*2*math.pi)
# properly format new filename and rename
name_ = f"{frame:04}_{angle:06.2f}"
os.rename(f, name_+ext)
| en | 0.529641 | #!/usr/bin/env python3 # seed random and get the angle # properly format new filename and rename | 2.640369 | 3 |
lhrhost/tests/messaging/dispatch/console.py | ethanjli/liquid-handling-robotics | 0 | 6612650 | <reponame>ethanjli/liquid-handling-robotics
"""Exposes a command-line serial console to the peripheral, with command validation."""
# Standard imports
import concurrent
import logging
# Local package imports
from lhrhost.messaging.dispatch import Dispatcher
from lhrhost.messaging.presentation import BasicTranslator, MessagePrinter
from lhrhost.messaging.presentation.actors import ConsoleManager
from lhrhost.messaging.transport.actors import ResponseReceiver, TransportManager
from lhrhost.tests.messaging.transport import console
from lhrhost.util import cli
# External imports
from pulsar.api import arbiter
# Logging
logging.config.dictConfig(console.LOGGING_CONFIG)
class Console(console.Console):
"""Actor-based serial console."""
def __init__(self, transport_loop):
"""Initialize member variables."""
self.arbiter = arbiter(start=self._start, stopping=self._stop)
self.echo_response_printer = MessagePrinter(
prefix=('\t' * cli.CONSOLE_WIDTH + '[Echo]\t')
)
self.reset_response_printer = MessagePrinter(
prefix=('\t' * cli.CONSOLE_WIDTH + '[Reset]\t')
)
self.version_response_printer = MessagePrinter(
prefix=('\t' * cli.CONSOLE_WIDTH + '[Version]\t')
)
self.builtin_led_response_printer = MessagePrinter(
prefix=('\t' * cli.CONSOLE_WIDTH + '[BuiltinLED]\t')
)
self.response_dispatcher = Dispatcher(
receivers={
'e': [self.echo_response_printer],
'r': [self.reset_response_printer]
},
prefix_receivers={
'v': [self.version_response_printer],
'l': [self.builtin_led_response_printer],
}
)
self.translator = BasicTranslator(
message_receivers=[self.response_dispatcher]
)
self.response_receiver = ResponseReceiver(
response_receivers=[self.translator]
)
self.transport_manager = TransportManager(
self.arbiter, transport_loop, response_receiver=self.response_receiver
)
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
self.console_manager = ConsoleManager(
self.arbiter, self.transport_manager.command_sender, self.translator,
console_header=cli.CONSOLE_HEADER, executor=self.executor,
ready_waiter=self.transport_manager.connection_synchronizer.wait_connected
)
if __name__ == '__main__':
console.main(Console)
| """Exposes a command-line serial console to the peripheral, with command validation."""
# Standard imports
import concurrent
import logging
# Local package imports
from lhrhost.messaging.dispatch import Dispatcher
from lhrhost.messaging.presentation import BasicTranslator, MessagePrinter
from lhrhost.messaging.presentation.actors import ConsoleManager
from lhrhost.messaging.transport.actors import ResponseReceiver, TransportManager
from lhrhost.tests.messaging.transport import console
from lhrhost.util import cli
# External imports
from pulsar.api import arbiter
# Logging
logging.config.dictConfig(console.LOGGING_CONFIG)
class Console(console.Console):
"""Actor-based serial console."""
def __init__(self, transport_loop):
"""Initialize member variables."""
self.arbiter = arbiter(start=self._start, stopping=self._stop)
self.echo_response_printer = MessagePrinter(
prefix=('\t' * cli.CONSOLE_WIDTH + '[Echo]\t')
)
self.reset_response_printer = MessagePrinter(
prefix=('\t' * cli.CONSOLE_WIDTH + '[Reset]\t')
)
self.version_response_printer = MessagePrinter(
prefix=('\t' * cli.CONSOLE_WIDTH + '[Version]\t')
)
self.builtin_led_response_printer = MessagePrinter(
prefix=('\t' * cli.CONSOLE_WIDTH + '[BuiltinLED]\t')
)
self.response_dispatcher = Dispatcher(
receivers={
'e': [self.echo_response_printer],
'r': [self.reset_response_printer]
},
prefix_receivers={
'v': [self.version_response_printer],
'l': [self.builtin_led_response_printer],
}
)
self.translator = BasicTranslator(
message_receivers=[self.response_dispatcher]
)
self.response_receiver = ResponseReceiver(
response_receivers=[self.translator]
)
self.transport_manager = TransportManager(
self.arbiter, transport_loop, response_receiver=self.response_receiver
)
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
self.console_manager = ConsoleManager(
self.arbiter, self.transport_manager.command_sender, self.translator,
console_header=cli.CONSOLE_HEADER, executor=self.executor,
ready_waiter=self.transport_manager.connection_synchronizer.wait_connected
)
if __name__ == '__main__':
console.main(Console) | en | 0.819089 | Exposes a command-line serial console to the peripheral, with command validation. # Standard imports # Local package imports # External imports # Logging Actor-based serial console. Initialize member variables. | 2.249717 | 2 |