id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
182005 | <reponame>s3q/andrunlook-apy<filename>andr.py
import requests
import os
import sys
import uuid
class api:
def __init__(self):
self.API = "http://localhost:8800/"
def savePublicProgress(self, aid, pin):
try:
data = {"aid": aid, "pin": pin}
req = requests.post(f"{self.API}api/andrpublicprogress", data)
return req.json()
except:
return None
def getAllPublicProgress(self):
try:
req = requests.get(f"{self.API}api/andrpublicprogress/all")
return req.json()
except:
return None
def saveProgress(self, aid, name, _id="", pinPublicProgress=0, pinRange=[], embeddedNumbers=[], pinLength=0):
try:
data = {"aid": aid, "name": name, "_id": _id, "pinPublicProgress": pinPublicProgress, "pinRange": pinRange,
"embeddedNumbers": embeddedNumbers, "pinLength": pinLength}
req = requests.post(f"{self.API}api/andrprogress", data)
return req.json()
except:
return None
def deleteProgress(self, aid, _id):
try:
req = requests.delete(
f"{self.API}api/andrprogress/{aid}/{_id}")
return req.json()
except:
return None
def deleteAllProgress(self, aid):
try:
req = requests.delete(f"{self.API}api/andrprogress/all/{aid}")
return req.json()
except:
return None
def getProgress(self, aid, _id):
try:
req = requests.get(
f"{self.API}api/andrprogress/{aid}/{_id}")
return req.json()
except:
return None
def getAllProgress(self, aid):
try:
req = requests.get(f"{self.API}api/andrprogress/all/{aid}")
return req.json()
except:
return None
def set_adb_dir():
platform = sys.platform.lower()
if platform.startswith("win"):
return "windows\\"
elif platform.startswith("linux") or platform.startswith("mac"):
return "linux/"
else:
print("[!] - Don't support you os")
os.system("pause")
exit()
def generate_aid():
if not os.path.isfile("andrDB.txt"):
with open("andrDB.txt", "w") as f:
f.write(str(uuid.uuid4()))
def get_aid():
generate_aid()
with open("andrDB.txt", "r") as f:
return f.read()
| StarcoderdataPython |
11246375 | import logging
from pymodbus.client.sync import ModbusSerialClient as ModbusClient
from pymodbus.exceptions import ModbusException
from python_utilities import utilities
def handle_response_error(response, request=None):
if hasattr(response, "registers"):
return
error = f"Modbus Response {response}"
if request:
error += f". Request: {request}"
logging.getLogger().warning(error)
raise ModbusException(error)
def handle_response(response, request):
logging.getLogger().info(request)
if response.isError():
#if not hasattr(response, "registers"):
handle_response_error(response, request)
else:
logging.getLogger().info(f"{response} {response.registers}")
data_bytes = utilities.RegistersToBytes(response.registers)
value = utilities.TransformBytesToDataType(request.dtype, data_bytes)
return value
def ReadHoldingRegisterEntry(entry, client, modbus_address):
response = client.read_holding_registers(address=entry.address, count=entry.registers, unit=modbus_address)
return handle_response(response, entry)
def ReadInputRegisterEntry(entry, client, modbus_address):
response = client.read_input_registers(address=entry.address, count=entry.registers, unit=modbus_address)
return handle_response(response, entry)
class ModbusMaster:
parity = "N"
bytesize = 8
stopbits = 1
method = "rtu"
timeout_seconds = 0.5
def __init__(self, port=None, baud=9600, socket=None):
self.port = port
self.baudrate = baud
# method=self.method,
if port is None and socket is None:
raise ValueError("Either port or socket must be specified")
self.client = ModbusClient(method=self.method, port=self.port,
parity=self.parity, timeout=self.timeout_seconds,
bytesize=self.bytesize, stopbits=self.stopbits,
baudrate=self.baudrate)
if socket:
self.client.socket = socket
def connect(self):
self.client.connect()
print("Connection Successful")
def disconnect(self):
self.client.close()
| StarcoderdataPython |
278841 | """
byceps.services.user.log_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from datetime import datetime
from typing import Optional
from sqlalchemy import select
from ...database import db
from ...typing import UserID
from .dbmodels.log import UserLogEntry as DbUserLogEntry
from .transfer.log import UserLogEntry, UserLogEntryData
def create_entry(
event_type: str,
user_id: UserID,
data: UserLogEntryData,
*,
occurred_at: Optional[datetime] = None,
) -> None:
"""Create a user log entry."""
entry = build_log_entry(event_type, user_id, data, occurred_at=occurred_at)
db.session.add(entry)
db.session.commit()
def build_log_entry(
event_type: str,
user_id: UserID,
data: UserLogEntryData,
*,
occurred_at: Optional[datetime] = None,
) -> DbUserLogEntry:
"""Assemble, but not persist, a user log entry."""
if occurred_at is None:
occurred_at = datetime.utcnow()
return DbUserLogEntry(occurred_at, event_type, user_id, data)
def get_entries_for_user(user_id: UserID) -> list[UserLogEntry]:
"""Return the log entries for that user."""
db_entries = db.session.execute(
select(DbUserLogEntry)
.filter_by(user_id=user_id)
.order_by(DbUserLogEntry.occurred_at)
).scalars().all()
return [_db_entity_to_entry(db_entry) for db_entry in db_entries]
def get_log_entries_of_type_for_user(
user_id: UserID, event_type: str
) -> list[UserLogEntry]:
"""Return the log entries of that type for that user."""
db_entries = db.session.execute(
select(DbUserLogEntry)
.filter_by(user_id=user_id)
.filter_by(event_type=event_type)
.order_by(DbUserLogEntry.occurred_at)
).scalars().all()
return [_db_entity_to_entry(db_entry) for db_entry in db_entries]
def delete_user_login_log_entries(occurred_before: datetime) -> int:
"""Delete login log entries which occurred before the given date.
Return the number of deleted log entries.
"""
num_deleted = db.session \
.query(DbUserLogEntry) \
.filter_by(event_type='user-logged-in') \
.filter(DbUserLogEntry.occurred_at < occurred_before) \
.delete()
db.session.commit()
return num_deleted
def _db_entity_to_entry(db_entry: DbUserLogEntry) -> UserLogEntry:
return UserLogEntry(
id=db_entry.id,
occurred_at=db_entry.occurred_at,
event_type=db_entry.event_type,
user_id=db_entry.user_id,
data=db_entry.data.copy(),
)
| StarcoderdataPython |
11263604 | <filename>ROS/service_client.py
#!/usr/bin/env python3
import rospy
from word_count.srv import WordCount
import sys
# le damos un nombre al node
rospy.init_node('service_client')
rospy.wait_for_service('word_count')
word_counter = rospy.ServiceProxy('word_count', WordCount)
words = ' '.join(sys.argv[1:])
word_count = word_counter(words)
print(word_count.count)
| StarcoderdataPython |
8146190 | # Generated by Django 2.0.6 on 2018-10-30 01:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0005_article_user'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='comdate',
),
migrations.AddField(
model_name='comment',
name='create_time',
field=models.DateTimeField(auto_now_add=True, default=2, verbose_name='创建时间'),
preserve_default=False,
),
migrations.AddField(
model_name='comment',
name='is_delete',
field=models.BooleanField(default=False, verbose_name='删除标记'),
),
migrations.AddField(
model_name='comment',
name='update_time',
field=models.DateTimeField(auto_now=True, verbose_name='更新时间'),
),
]
| StarcoderdataPython |
1776499 | class PageNotFoundException(Exception):
...
| StarcoderdataPython |
190892 | # name='sopel_modules.idlerpg',
# version='0.1.0',
# description='A rewrite of the original IdleRPG to work as a module for Sopel. It incorporates some of the features of Shocky\'s IdleRPG system, though is much more in-depth.',
# long_description=readme + '\n\n' + history,
# author='<NAME>',
# author_email='<EMAIL>',
# url='http://github.com/maxpowa/sopel-idlerpg',
# packages=find_packages('.'),
# namespace_packages=['sopel_modules'],
# include_package_data=True,
# install_requires=requirements,
# tests_require=dev_requirements,
# test_suite='tests',
# license='Eiffel Forum License, version 2',
# coding=utf-8
from __future__ import unicode_literals, absolute_import, division, print_function
from sopel import module
from sopel.logger import get_logger
from helpers import Session, Player
import re
import json
import math
import time
import sys
if sys.version_info.major < 3:
str = unicode
int = long
range = xrange
LOGGER = get_logger(__name__)
netsplit_regex = re.compile('^:\S+\.\S+ \S+\.\S+$')
all_sessions = set()
flag = False
def create_player(bot, session):
data = Player(session).get_data()
bot.db.set_nick_value(session.login, 'idlerpg_' + session.channel, data)
def get_player(bot, session, login):
for s in all_sessions:
if (s.nick.lower() == login.lower() or
s.login.lower() == login.lower()):
login = s.login
break
data = bot.db.get_nick_value(login, 'idlerpg_' + session.channel)
if not data:
return None
return Player(**data)
def save_player(bot, player):
data = player.get_data()
bot.db.set_nick_value(player.session.login, 'idlerpg_' +
player.session.channel, data)
users = bot.db.get_channel_value(player.session.channel, 'idlerpg_players')
if not users:
users = []
if player.session.login not in users:
users.append(player.session.login)
bot.db.set_channel_value(player.session.channel, 'idlerpg_players',
users)
@module.commands('idlerpg', 'irpg')
@module.require_chanmsg('[idlerpg] You can\'t configure idlerpg in a '
'private message!')
@module.require_privilege(module.OP, '[idlerpg] You must be an OP to '
'change idlerpg settings!')
@module.priority('low')
def ch_settings(bot, trigger):
"""
.irpg <start|resume|pause> - Resume or pause idlerpg in the current channel
"""
global all_sessions
if not trigger.group(2):
bot.say(ch_settings.__doc__)
return
if (trigger.group(2).strip().lower() == 'resume' or
trigger.group(2).strip().lower() == 'start'):
bot.db.set_channel_value(trigger.sender, 'idlerpg', True)
# add sessions
for nick in bot.channels[trigger.sender].users:
user = bot.users[nick]
LOGGER.info(repr(user))
if user.nick.lower() == bot.config.core.nick.lower():
continue
if user.account == '0':
continue
session = Session(trigger.sender, user.nick, user.account)
player = get_player(bot, session, session.login)
if player is None:
continue
all_sessions.add(session)
player.session = session
player.update(None)
save_player(bot, player)
bot.say('[idlerpg] Resuming idlerpg in ' + trigger.sender)
elif ('version'.startswith(trigger.group(2).strip().lower())):
bot.say('[idlerpg] Version {} by {}, report issues at {}'.format(
idlerpg.__version__, idlerpg.__author__, idlerpg.__repo__))
else:
bot.db.set_channel_value(trigger.sender, 'idlerpg', False)
new_sessions = set()
for session in all_sessions:
if session.channel == trigger.sender:
continue
new_sessions.add(session)
all_sessions = new_sessions
bot.say('[idlerpg] Paused idlerpg in ' + trigger.sender)
@module.rule('^>.*')
@module.event('PRIVMSG')
@module.require_chanmsg('[idlerpg] You must play idlerpg with other people!')
@module.priority('low')
@module.thread(True)
def auth(bot, trigger):
if not bot.db.get_channel_value(trigger.sender, 'idlerpg'):
return
if not trigger.account or trigger.account == '0':
return bot.notice('[idlerpg] You must be authenticated with '
'NickServ', destination=trigger.nick)
session = Session(trigger.sender, trigger.nick, trigger.account)
all_sessions.add(session)
args = trigger.args[1:]
args = args[0][1:].strip().split(' ')
if len(args[0]) == 0 and len(args) == 1:
args = []
elif len(args[0]) == 0:
return # This must be an unrelated > prefixed message
if len(args) == 0 or (len(args) <= 2 and
'status'.startswith(args[0].lower())):
check = get_player(bot, session, session.login)
if (len(args) == 2):
check = get_player(bot, session, args[1])
if check is None:
if (len(args) == 2):
return bot.notice('[idlerpg] Player \'{}\' does not exist.'
.format(args[1]), destination=trigger.nick)
create_player(bot, session)
all_sessions.add(session)
return bot.notice('[idlerpg] Welcome to IdleRPG, {}! You are '
'logged in as {}.'.format(session.nick, session.login),
destination=trigger.nick)
check.update(session)
save_player(bot, check)
bot.notice('[idlerpg] {}'.format(check.get_status(bot, session,
include_xp=True, include_time=True)), destination=trigger.nick)
elif len(args) == 1 and 'leaderboards'.startswith(args[0].lower()):
player_list = []
name_list = bot.db.get_channel_value(trigger.sender, 'idlerpg_players')
if not name_list:
name_list = []
for login in name_list:
session = Session(trigger.sender, login, login)
player = get_player(bot, session, session.login)
if not player:
continue
tmp = Session(session.channel, '', '')
player.update(tmp)
player_list.append(player)
player_list.sort(key=lambda x: (x.level, x.xp /
(x.xp_to_next_level() + x.get_penalty_time())), reverse=True)
#TODO: Config leaderboard print amount
size = 10 if (len(player_list) >= 10) else len(player_list)
out = ''
for i in range(0, size):
player = player_list[i]
out = '{}. {}'.format(str(i + 1), player.get_status(bot,
session, include_xp=True, leaderboard=True))
bot.notice(out, destination=trigger.nick)
@module.interval(60)
def update_all(bot):
for session in all_sessions:
if not bot.db.get_channel_value(session.channel, 'idlerpg'):
continue
player = get_player(bot, session, session.login)
if player is None:
continue
# Fake session to updaate players with
s = Session(session.channel, '', '')
player.update(s)
save_player(bot, player)
@module.rule('^[^.>].*')
def privmsg(bot, trigger):
for session in all_sessions:
if session.channel != trigger.sender or trigger.nick != session.nick:
continue
player = get_player(bot, session, session.login)
if player is None:
continue
player.session = session
player.penalize(len(trigger.match.string))
player.update(session)
save_player(bot, player)
@module.rule('.*')
@module.event('NOTICE')
def notice(bot, trigger):
for session in all_sessions:
if session.channel != trigger.sender or trigger.nick != session.nick:
continue
player = get_player(bot, session, session.login)
if player is None:
continue
player.session = session
player.penalize(len(trigger.match.string))
player.update(session)
save_player(bot, player)
@module.rule('.*')
@module.event('JOIN')
@module.priority('low')
def join(bot, trigger):
if not bot.db.get_channel_value(trigger.sender, 'idlerpg'):
return
if not trigger.account:
return
session = Session(trigger.sender, trigger.nick, trigger.account)
player = get_player(bot, session, session.login)
if player is None:
return
all_sessions.add(session)
player.session = session
player.update(None)
save_player(bot, player)
@module.rule('.*')
@module.event('QUIT')
@module.priority('high')
def quit(bot, trigger):
global all_sessions
netsplit = netsplit_regex.match(trigger.match.string)
new_sessions = set()
for session in all_sessions:
if session.nick == trigger.nick:
session = Session(session.channel, trigger.nick, session.login)
player = get_player(bot, session, session.login)
if player is None:
continue
player.session = session
if netsplit:
player.update(None)
else:
player.penalize(20)
player.update(session)
save_player(bot, player)
else:
new_sessions.add(session)
all_sessions = new_sessions
@module.rule('.*')
@module.event('NICK')
@module.priority('high')
def nick(bot, trigger):
global all_sessions
new_sessions = set()
for session in all_sessions:
if session.nick == trigger.nick:
session.nick = trigger.sender
new_sessions.add(session)
player = get_player(bot, session, session.login)
if player is None:
continue
player.session = session
player.penalize(30)
player.update(session)
save_player(bot, player)
all_sessions = new_sessions
@module.rule('.*')
@module.event('PART')
@module.priority('high')
def part(bot, trigger):
global all_sessions
new_sessions = set()
for session in all_sessions:
if session.channel != trigger.sender or trigger.nick != session.nick:
new_sessions.add(session)
continue
player = get_player(bot, session, session.login)
if player is None:
continue
player.session = session
player.penalize(200)
player.update(session)
save_player(bot, player)
all_sessions = new_sessions
@module.rule('.*')
@module.event('KICK')
@module.priority('high')
def kick(bot, trigger):
global all_sessions
new_sessions = set()
for session in all_sessions:
if session.channel != trigger.sender or session.nick != trigger.args[1]:
new_sessions.add(session)
continue
player = get_player(bot, session, session.login)
if player is None:
continue
player.session = session
player.penalize(250)
player.update(session)
save_player(bot, player)
all_sessions = new_sessions | StarcoderdataPython |
4957399 | import sys
# 参考: http://w.livedoor.jp/met-python/d/matplotlib
import numpy as np
import matplotlib.pyplot as plt
import formula
if __name__ == '__main__':
if len(sys.argv) < 2:
rate = 2
else:
rate = int(sys.argv[1])
print('rate =', rate)
print()
# n_blocks * block = slot
# n_slots * slot = data
# n_blocks * n_slots * block = data
# n_slots = redundancy
# data_blocks = n_slots * n_blocks
data_blocks = 128
pb = 0.99
max_redundancy = 64
point_styles = ['o', 'D', 's', '+'] * 2
index = 0
for n_parity_slots in range(0, 4 + 1):
xs = np.arange(1, max_redundancy + 1)
ys = []
for x in xs:
n_slots = x
if n_slots <= n_parity_slots:
continue
if n_slots == 0:
raise RuntimeError('n_slots must be positive integer.')
n_blocks = data_blocks / n_slots
ps = pb ** n_blocks
# print('n_slots=x={}'.format(x))
# print('n_blocks={}'.format(n_blocks))
# print('n_slots={}'.format(n_slots))
# print('pb={:.3f}'.format(pb))
# print('ps={:.3f}'.format(ps))
# print()
pd = formula.p_reed_solomon(int(n_slots), int(n_parity_slots), ps)
ys.append(pd)
xs = xs[len(xs)-len(ys):]
plt.plot(xs, ys, 'b{}'.format(point_styles[index]))
# plt.plot(xs, ys, lw=1, color='b')
index += 1
# print('len(xs) =', len(xs))
# print('len(ys) =', len(ys))
# print('xs =', list(xs))
# print('ys =', list(ys))
# print()
plt.xlim(xmin=0, xmax=max_redundancy+1)
plt.ylim(ymin=-0.05, ymax=1.05)
plt.grid(True)
# from ticket_89.py
plt.minorticks_on()
plt.locator_params(axis='both', tight=False, nbins=21) # grid 間隔
plt.show()
| StarcoderdataPython |
12816581 | '''
Author: <NAME> (@mirmirik)
Twitter API'sine bağlanıp, belirli bir tweet'i RT edenleri takipten çıkarmak ya da bloklamak için yazılmış deneme / sandbox kodu.
myTwitter.cfg dosyası içine ilgili değerlerin eklenmesi gerekmektedir.
Konfigürasyon dosyası değerleri:
[auth]
ACCOUNT = <bilgilerine erişilecek size ait hesap adı>
CONSUMER_KEY = <Twitter Development / APPS kısmından alınacak olacan CONSUMER KEY>
CONSUMER_SECRET = <Twitter Development / APPS kısmından alınacak olacan CONSUMER SECRET>
IN_DEBUG_MODE = True ise,
Bloklanan ya da takipten çıkarılan kullanıcıların bilgilerini data/ dizini altındaki dosyaya yazar
Eğer bu koddan etkilenmesini istemediğiniz kullanıcılar varsa, onların user_id'lerini, twStart.py içindeki WhiteListUsers()
listesine eklemeniz gerekiyor.
Komut satırından çalıştırma:
python blockRTs.py
-t <Tweet ID> : RT'lenen tweet'in ID'si (tweet'i browser'da açıp adres satırındaki numarayı kullanabilirsiniz)
-s <True / False> : Eğer "-s True" olarak kullanırsanız, blok, mute ya da unfollow işlemi için onayınızı sormaz. Direkt yapar.
Twitter API'ye erişim sağlayan wrapper library:
https://github.com/sixohsix/twitter
Twitter User Object detayları:
https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/user-object
Bir tweet'i RT edenleri alan API:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-retweeters-ids
Geliştiriciler için kullanıcı ve hesap bilgilerinin kullanımı :
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/overview
'''
import twStart
from datetime import datetime
import argparse
from colorama import Fore, Back, Style, init
import gettext
_ = gettext.gettext
IN_DEBUG_MODE: bool = False
TODAY_FORMATTED = datetime.today().strftime('%Y%m%d')
def BUM(tw, user, action):
""" Block, Unfollow or Mute function based on the parameters. If user is in the WhiteList, then returns without
doing anything.\n
tw: Twitter object that will be used in API calls,\n
user: UserId to be processed,\n
action: 'B' for blocking, 'U' to unfollow and 'M' for muting.
"""
if (user in twStart.WHITE_LIST_USERS):
return
if(action == "B"):
print(_("Blocked: {0}").format(user))
# TODO: Uncomment the code below
# tw.blocks.create(user_id=usrId, skip_status=1, include_entities=False)
return
elif (action == "M"):
print(_("Muted: {0}").format(user))
# TODO: Uncomment the code below
# tw.users.mutes(user_id=usrId)
return
elif(action == "U"):
print(_("Unfollowed: {0}").format(user))
# TODO: Uncomment the code below
# tw.friendships.destroy(user_id=usrId)
return
def blockRetweeters(TweetId, IsSilent):
activeCursor = -1
fileName = twStart.DATA_FOLDER + "/RTBlocked_" + TODAY_FORMATTED + ".txt"
if(IN_DEBUG_MODE):
fn = open(fileName, "w+")
print("{:25s}{:25s}".format("User Id", "User Name"))
print("-" * 100)
if(IN_DEBUG_MODE):
fn.write("{0}{1}".format("User Id", "User Name") + "\n")
tw = twStart.hitTwitter()
while activeCursor != 0:
f = tw.statuses.retweeters.ids(cursor=activeCursor, count=100, _id=TweetId, stringify_ids=True)
userIds = ",".join(f["ids"])
users = tw.users.lookup(user_id=userIds)
i = 0
for usrId in f["ids"]:
color = Fore.RED if usrId in twStart.WHITE_LIST_USERS else Fore.GREEN
print(Style.BRIGHT + color + "{:25s} {:25s}".format(usrId, users[i]["screen_name"]))
if(IN_DEBUG_MODE):
fn.write("{0}\t{1}".format(usrId, users[i]["screen_name"]) + "\n")
i+=1
activeCursor = f["next_cursor"]
if(IN_DEBUG_MODE):
fn.close()
print("-"*96)
print(_("What do you want to do with all these users (case sensitive)?"))
remove = input(_("[B]lock / [U]nfollow / [M]ute / [E]xit: "))
if (IsSilent==False):
RUsure = input(_("Are you sure? [Y]es / [N]o: "))
else:
RUsure = "Y"
if(RUsure=="Y"):
for usrId in f["ids"]:
BUM(tw, usrId, remove)
print (_("All done! Happy tweeting :) "))
return
print(_("Nothing to do here!"))
def main():
parser = argparse.ArgumentParser(description=_("Blocks, mutes or unfollows specific Tweet's all retweeters."))
parser.add_argument("-t",
dest="TWEET_ID",
help=_("Tweet Id to be used to define target users. All users those retweeted this one will be removed"))
parser.add_argument("-s",
dest="SILENT",
help=_("If used, there won't be a confirmation for removal action."),
default=False)
args = parser.parse_args()
if args.TWEET_ID:
blockRetweeters(args.TWEET_ID, args.SILENT)
else:
print(_("Please use -t parameter to define TweetId"))
if __name__ == "__main__":
init(autoreset=True)
main()
| StarcoderdataPython |
6589034 | version = __version__ = "0.1.4"
| StarcoderdataPython |
3423246 | <reponame>sakshigupta87/ml_algo_prac<filename>bar.py
#!/usr/bin/python3
import matplotlib.pyplot as plt
y=[200,40,60,59]
x=["sam","om","anu","eva"]
y1=[30,50,67,89]
x1=["ss","oo","aa","ee"]
plt.xlabel("time")
plt.ylabel("distance")
#plt.bar(x,y, c='y')
#plt.bar(x1,y1, c='r')
#pltscale(to set scale)
plt.bar(x,y,label="river")
plt.bar(x1,y1,label="railway")
plt.legend()
plt.grid(color='r')
plt.show()
| StarcoderdataPython |
4940397 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from project11_flexbe_states.standby_state import StandbyState
from project11_flexbe_states.manual_state import ManualState
from project11_flexbe_states.autonomous_state import AutonomousState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Mon Mar 18 2019
@author: <NAME>
'''
class Project11TopLevelBehaviorSM(Behavior):
'''
Handles the top level states for the Project11 Backseat Driver
'''
def __init__(self):
super(Project11TopLevelBehaviorSM, self).__init__()
self.name = 'Project 11 Top Level Behavior'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:30 y:365, x:130 y:365
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:245 y:69
OperatableStateMachine.add('standby',
StandbyState(),
transitions={'manual': 'manual', 'autonomous': 'autonomous'},
autonomy={'manual': Autonomy.Off, 'autonomous': Autonomy.Off})
# x:54 y:267
OperatableStateMachine.add('manual',
ManualState(),
transitions={'autonomous': 'autonomous', 'standby': 'standby'},
autonomy={'autonomous': Autonomy.Off, 'standby': Autonomy.Off})
# x:423 y:269
OperatableStateMachine.add('autonomous',
AutonomousState(),
transitions={'manual': 'manual', 'standby': 'standby'},
autonomy={'manual': Autonomy.Off, 'standby': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| StarcoderdataPython |
3475725 | <reponame>TechPenguineer/Python-Exercises
# What will the output of this be?
name = input("What is your name? \n")
upperCaseName = name.upper();
print(f"HELLO, {upperCaseName}!") | StarcoderdataPython |
3434390 | <gh_stars>100-1000
#!/usr/bin/env python3
# The MIT License
# Copyright (c) 2016 Estonian Information System Authority (RIA), Population Register Centre (VRK)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Test case for verifying that invalid 'recordsFrom' and 'recordsTo'
# values in operational monitoring request result in a SOAP fault and
# 'recordsTo' value in the future results with 'nextRecordsFrom' element
# in operational monitoring response.
import os
import sys
sys.path.append('..')
import python_common as common
def run(client_security_server_address, producer_security_server_address,
ssh_user, request_template_dir):
query_data_client_template_filename = os.path.join(
request_template_dir, "query_operational_data_client_template.xml")
query_data_client_missing_recordsfrom_template_filename = os.path.join(
request_template_dir,
"query_operational_data_client_missing_recordsfrom_template.xml")
query_data_client_missing_recordsto_template_filename = os.path.join(
request_template_dir,
"query_operational_data_client_missing_recordsto_template.xml")
query_data_client_empty_search_criteria_template_filename = os.path.join(
request_template_dir,
"query_operational_data_client_empty_search_criteria_template.xml")
query_data_client_missing_search_criteria_template_filename = os.path.join(
request_template_dir,
"query_operational_data_client_missing_search_criteria_template.xml")
### Operational data requests and the relevant checks
message_id = common.generate_message_id()
print("\n---- Sending an operational data request where 'recordsTo' is " \
"earlier than 'recordsFrom' to the client's security server ----\n")
request_contents = common.format_query_operational_data_request_template(
query_data_client_template_filename, message_id, 1479823179,
1479823175)
print("Generated the following operational data request for the client's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents)
print("\nReceived the following X-Road response: \n")
xml = common.parse_and_clean_xml(response.text)
print(xml.toprettyxml())
# Earlier recordsTo than recordsFrom in operational monitoring request must
# result in a SOAP fault
common.assert_soap_fault(xml)
message_id = common.generate_message_id()
print("\n---- Sending an operational data request where " \
"'recordsFrom' is in the future to the client's security server ----\n")
request_contents = common.format_query_operational_data_request_template(
query_data_client_template_filename, message_id, 2479823179,
2479823185)
print("Generated the following operational data request for the client's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents)
print("\nReceived the following X-Road response: \n")
xml = common.parse_and_clean_xml(response.text)
print(xml.toprettyxml())
# recordsFrom >= (now - records-available-timestamp-offset-seconds) in operational
# monitoring request must result in a SOAP fault
common.assert_soap_fault(xml)
message_id = common.generate_message_id()
print("\n---- Sending an operational data request without " \
"'recordsFrom' element to the client's security server ----\n")
request_contents = common.format_query_operational_data_request_template(
query_data_client_missing_recordsfrom_template_filename,
message_id, None, 1479823185)
print("Generated the following operational data request for the client's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents)
print("\nReceived the following X-Road response: \n")
xml = common.parse_and_clean_xml(response.text)
print(xml.toprettyxml())
# Missing recordsFrom element in operational monitoring request must
# result in a SOAP fault
common.assert_soap_fault(xml)
message_id = common.generate_message_id()
print("\n---- Sending an operational data request without " \
"'recordsTo' element to the client's security server ----\n")
request_contents = common.format_query_operational_data_request_template(
query_data_client_missing_recordsto_template_filename,
message_id, 1479823185, None)
print("Generated the following operational data request for the client's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents)
print("\nReceived the following X-Road response: \n")
xml = common.parse_and_clean_xml(response.text)
print(xml.toprettyxml())
# Missing recordsTo element in operational monitoring request must
# result in a SOAP fault
common.assert_soap_fault(xml)
message_id = common.generate_message_id()
print("\n---- Sending an operational data request without 'recordsFrom'" \
" and 'recordsTo' elements to the client's security server ----\n")
request_contents = common.format_query_operational_data_request_template(
query_data_client_empty_search_criteria_template_filename,
message_id, None, None)
print("Generated the following operational data request for the client's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents)
print("\nReceived the following X-Road response: \n")
xml = common.parse_and_clean_xml(response.text)
print(xml.toprettyxml())
# Missing recordsFrom and recordsTo elements in operational monitoring
# request must result in a SOAP fault
common.assert_soap_fault(xml)
message_id = common.generate_message_id()
print("\n---- Sending an operational data request without 'searchCriteria'" \
" element to the client's security server ----\n")
request_contents = common.format_query_operational_data_request_template(
query_data_client_missing_search_criteria_template_filename,
message_id, None, None)
print("Generated the following operational data request for the client's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents)
print("\nReceived the following X-Road response: \n")
xml = common.parse_and_clean_xml(response.text)
print(xml.toprettyxml())
# Missing searchCriteria element in operational monitoring
# request must result in a SOAP fault
common.assert_soap_fault(xml)
message_id = common.generate_message_id()
print("\n---- Sending an operational data request with non-numeric 'recordsFrom'" \
" value to the client's security server ----\n")
request_contents = common.format_query_operational_data_request_template(
query_data_client_template_filename, message_id, "abc", 1479823185)
print("Generated the following operational data request for the client's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents)
print("\nReceived the following X-Road response: \n")
xml = common.parse_and_clean_xml(response.text)
print(xml.toprettyxml())
# Non-numeric recordsFrom value in operational monitoring request must
# result in a SOAP fault
common.assert_soap_fault(xml)
message_id = common.generate_message_id()
print("\n---- Sending an operational data request with too large 'recordsTo'" \
" value to the client's security server ----\n")
request_contents = common.format_query_operational_data_request_template(
query_data_client_template_filename, message_id, 1479823185,
888888888888888888888)
print("Generated the following operational data request for the client's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents)
print("\nReceived the following X-Road response: \n")
xml = common.parse_and_clean_xml(response.text)
print(xml.toprettyxml())
# Too large recordsTo value in operational monitoring request must
# result in a SOAP fault
common.assert_soap_fault(xml)
message_id = common.generate_message_id()
print("\n---- Sending an operational data request with negative 'recordsFrom'" \
" and 'recordsTo' values to the client's security server ----\n")
request_contents = common.format_query_operational_data_request_template(
query_data_client_template_filename, message_id, -1479823185,
-1479823183)
print("Generated the following operational data request for the client's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents)
print("\nReceived the following X-Road response: \n")
xml = common.parse_and_clean_xml(response.text)
print(xml.toprettyxml())
# Negative recordsFrom and recordsTo values in operational monitoring
# request must result in a SOAP fault
common.assert_soap_fault(xml)
message_id = common.generate_message_id()
timestamp_before_request = common.get_remote_timestamp(
client_security_server_address, ssh_user)
print("\n---- Sending an operational data request where " \
"'recordsTo' is in the future to the client's security server ----\n")
# Let's craft a request where recordsFrom is in the past
# and recordsTo is in the future
request_contents = common.format_query_operational_data_request_template(
query_data_client_template_filename, message_id,
timestamp_before_request - 5, timestamp_before_request + 10)
print("Generated the following operational data request for the client's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part, record_count = common.get_multipart_soap_and_record_count(
mime_parts[0])
common.print_multipart_soap_and_record_count(soap_part, record_count)
# In case 'recordsTo' value is in the future, the element
# 'nextRecordsFrom' is expected in operational
# monitoring response. 'nextRecordsFrom' value is expected to be
# (now - records-available-timestamp-offset-seconds).
# records-available-timestamp-offset-seconds value is expected to be set to 0
# before the test in run_tests.py.
common.assert_get_next_records_from_in_range(
soap_part, timestamp_before_request)
else:
common.parse_and_check_soap_response(raw_response)
| StarcoderdataPython |
1617439 | <filename>zstacklib/zstacklib/utils/concurrentlog_handler.py
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" cloghandler.py: A smart replacement for the standard RotatingFileHandler
ConcurrentRotatingFileHandler: This class is a log handler which is a drop-in
replacement for the python standard log handler 'RotateFileHandler', the primary
difference being that this handler will continue to write to the same file if
the file cannot be rotated for some reason, whereas the RotatingFileHandler will
strictly adhere to the maximum file size. Unfortunately, if you are using the
RotatingFileHandler on Windows, you will find that once an attempted rotation
fails, all subsequent log messages are dropped. The other major advantage of
this module is that multiple processes can safely write to a single log file.
To put it another way: This module's top priority is preserving your log
records, whereas the standard library attempts to limit disk usage, which can
potentially drop log messages. If you are trying to determine which module to
use, there are number of considerations: What is most important: strict disk
space usage or preservation of log messages? What OSes are you supporting? Can
you afford to have processes blocked by file locks?
Concurrent access is handled by using file locks, which should ensure that log
messages are not dropped or clobbered. This means that a file lock is acquired
and released for every log message that is written to disk. (On Windows, you may
also run into a temporary situation where the log file must be opened and closed
for each log message.) This can have potentially performance implications. In my
testing, performance was more than adequate, but if you need a high-volume or
low-latency solution, I suggest you look elsewhere.
This module currently only support the 'nt' and 'posix' platforms due to the
usage of the portalocker module. I do not have access to any other platforms
for testing, patches are welcome.
See the README file for an example usage of this module.
This module supports Python 2.6 and later.
"""
import os
import sys
from random import randint
from logging import Handler, LogRecord
from logging.handlers import BaseRotatingHandler
try:
import codecs
except ImportError:
codecs = None
# Question/TODO: Should we have a fallback mode if we can't load portalocker /
# we should still be better off than with the standard RotattingFileHandler
# class, right? We do some rename checking... that should prevent some file
# clobbering that the builtin class allows.
# sibling module than handles all the ugly platform-specific details of file locking
from portalocker import lock, unlock, LOCK_EX, LOCK_NB, LockException
# Workaround for handleError() in Python 2.7+ where record is written to stderr
class NullLogRecord(LogRecord):
def __init__(self):
pass
def __getattr__(self, attr):
return None
class ConcurrentRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file to the
next when the current file reaches a certain size. Multiple processes can
write to the log file concurrently, but this may mean that the file will
exceed the given size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
encoding=None, debug=True, delay=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
On Windows, it is not possible to rename a file that is currently opened
by another process. This means that it is not possible to rotate the
log files if multiple processes is using the same log file. In this
case, the current log file will continue to grow until the rotation can
be completed successfully. In order for rotation to be possible, all of
the other processes need to close the file first. A mechanism, called
"degraded" mode, has been created for this scenario. In degraded mode,
the log file is closed after each log message is written. So once all
processes have entered degraded mode, the net rotation attempt should
be successful and then normal logging can be resumed. Using the 'delay'
parameter may help reduce contention in some usage patterns.
This log handler assumes that all concurrent processes logging to a
single file will are using only this class, and that the exact same
parameters are provided to each instance of this class. If, for
example, two different processes are using this class, but with
different values for 'maxBytes' or 'backupCount', then odd behavior is
expected. The same is true if this class is used by one application, but
the RotatingFileHandler is used by another.
"""
# Absolute file name handling done by FileHandler since Python 2.5
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.delay = delay
self._rotateFailed = False
self.maxBytes = maxBytes
self.backupCount = backupCount
self._open_lockfile()
# For debug mode, swap out the "_degrade()" method with a more a verbose one.
if debug:
self._degrade = self._degrade_debug
def _open_lockfile(self):
# Use 'file.lock' and not 'file.log.lock' (Only handles the normal "*.log" case.)
if self.baseFilename.endswith(".log"):
lock_file = self.baseFilename[:-4]
else:
lock_file = self.baseFilename
lock_file += ".lock"
self.stream_lock = open(lock_file, "w")
def _open(self, mode=None):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
Note: Copied from stdlib. Added option to override 'mode'
"""
if mode is None:
mode = self.mode
if self.encoding is None:
stream = open(self.baseFilename, mode)
else:
stream = codecs.open(self.baseFilename, mode, self.encoding)
return stream
def _close(self):
""" Close file stream. Unlike close(), we don't tear anything down, we
expect the log to be re-opened after rotation."""
if self.stream:
try:
if not self.stream.closed:
# Flushing probably isn't technically necessary, but it feels right
#self.stream.flush()
self.stream.close()
finally:
self.stream = None
def acquire(self):
""" Acquire thread and file locks. Re-opening log for 'degraded' mode.
"""
# handle thread lock
Handler.acquire(self)
# Issue a file lock. (This is inefficient for multiple active threads
# within a single process. But if you're worried about high-performance,
# you probably aren't using this log handler.)
if self.stream_lock:
# If stream_lock=None, then assume close() was called or something
# else weird and ignore all file-level locks.
if self.stream_lock.closed:
# Daemonization can close all open file descriptors, see
# https://bugzilla.redhat.com/show_bug.cgi?id=952929
# Try opening the lock file again. Should we warn() here?!?
try:
self._open_lockfile()
except Exception:
self.handleError(NullLogRecord())
# Don't try to open the stream lock again
self.stream_lock = None
return
lock(self.stream_lock, LOCK_EX)
# Stream will be opened as part by FileHandler.emit()
def release(self):
""" Release file and thread locks. If in 'degraded' mode, close the
stream to reduce contention until the log files can be rotated. """
try:
if self._rotateFailed:
self._close()
except Exception:
self.handleError(NullLogRecord())
finally:
try:
if self.stream_lock and not self.stream_lock.closed:
unlock(self.stream_lock)
except Exception:
self.handleError(NullLogRecord())
finally:
# release thread lock
Handler.release(self)
def close(self):
"""
Close log stream and stream_lock. """
try:
self._close()
if not self.stream_lock.closed:
self.stream_lock.close()
finally:
self.stream_lock = None
Handler.close(self)
def _degrade(self, degrade, msg, *args):
""" Set degrade mode or not. Ignore msg. """
self._rotateFailed = degrade
del msg, args # avoid pychecker warnings
def _degrade_debug(self, degrade, msg, *args):
""" A more colorful version of _degade(). (This is enabled by passing
"debug=True" at initialization).
"""
if degrade:
if not self._rotateFailed:
sys.stderr.write("Degrade mode - ENTERING - (pid=%d) %s\n" %
(os.getpid(), msg % args))
self._rotateFailed = True
else:
if self._rotateFailed:
sys.stderr.write("Degrade mode - EXITING - (pid=%d) %s\n" %
(os.getpid(), msg % args))
self._rotateFailed = False
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
self._close()
if self.backupCount <= 0:
# Don't keep any backups, just overwrite the existing backup file
# Locking doesn't much matter here; since we are overwriting it anyway
self.stream = self._open("w")
return
try:
# Determine if we can rename the log file or not. Windows refuses to
# rename an open file, Unix is inode base so it doesn't care.
# Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable
tmpname = None
while not tmpname or os.path.exists(tmpname):
tmpname = "%s.rotate.%08d" % (self.baseFilename, randint(0, 99999999))
try:
# Do a rename test to determine if we can successfully rename the log file
os.rename(self.baseFilename, tmpname)
except (IOError, OSError):
exc_value = sys.exc_info()[1]
self._degrade(True, "rename failed. File in use? "
"exception=%s", exc_value)
return
# Q: Is there some way to protect this code from a KeboardInterupt?
# This isn't necessarily a data loss issue, but it certainly does
# break the rotation process during stress testing.
# There is currently no mechanism in place to handle the situation
# where one of these log files cannot be renamed. (Example, user
# opens "logfile.3" in notepad); we could test rename each file, but
# nobody's complained about this being an issue; so the additional
# code complexity isn't warranted.
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
# print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(tmpname, dfn)
# print "%s -> %s" % (self.baseFilename, dfn)
self._degrade(False, "Rotation completed")
finally:
# Re-open the output stream, but if "delay" is enabled then wait
# until the next emit() call. This could reduce rename contention in
# some usage patterns.
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
For those that are keeping track. This differs from the standard
library's RotatingLogHandler class. Because there is no promise to keep
the file size under maxBytes we ignore the length of the current record.
"""
del record # avoid pychecker warnings
# Is stream is not yet open, skip rollover check. (Check will occur on
# next message, after emit() calls _open())
if self.stream is None:
return False
if self._shouldRollover():
# If some other process already did the rollover (which is possible
# on Unix) the file our stream may now be named "log.1", thus
# triggering another rollover. Avoid this by closing and opening
# "log" again.
self._close()
self.stream = self._open()
return self._shouldRollover()
return False
def _shouldRollover(self):
if self.maxBytes > 0: # are we rolling over?
self.stream.seek(0, 2) # due to non-posix-compliant Windows feature
if self.stream.tell() >= self.maxBytes:
return True
else:
self._degrade(False, "Rotation done or not needed at this time")
return False
| StarcoderdataPython |
5017088 | import json
from tkinter import PhotoImage, Tk
from classes import (
CalculatableItem,
Field,
MainFrame,
Manager,
Resulter,
ResultsFrame
)
# Creation of calculatable items
with open("items.json") as file_with_items:
items = json.load(file_with_items)
cs_calculatable_items = []
for cross_section in items["cross_sections"]:
cs_calculatable_items.append(CalculatableItem(
cross_section["name"],
cross_section["image_path"],
cross_section["formula"],
cross_section["parameters"],
cross_section["constraints"],
))
ls_calculatable_items = []
for load_scheme in items["load_schemes"]:
ls_calculatable_items.append(CalculatableItem(
load_scheme["name"],
load_scheme["image_path"],
load_scheme["formula"],
load_scheme["parameters"],
load_scheme["constraints"],
))
# Creation of graphic interface
number_of_parameters_in_one_row = 4
number_of_rows = \
len(cs_calculatable_items) // (number_of_parameters_in_one_row + 1) + \
len(ls_calculatable_items) // (number_of_parameters_in_one_row + 1) + 2
# Main Window
root = Tk()
root.geometry(f"640x{505 + 31*number_of_rows+126}") # "adaptive" height
root.resizable(False, False)
root.title("MakhlaevSoftware - Beam deflection calculator")
# Main Window logo
image = PhotoImage(file="images/happy_deflection.png")
root.iconphoto(False, image)
# Cross section frame creation
cross_section_main_f = MainFrame(
root,
"Cross section",
"Type of cross section",
"Parameters of cross section"
)
cross_section_manager = Manager(
cross_section_main_f,
cs_calculatable_items
)
# Load scheme frame creation
load_scheme_main_f = MainFrame(
root,
"Load scheme",
"Type of load scheme",
"Parameters of load scheme"
)
load_scheme_manager = Manager(
load_scheme_main_f,
ls_calculatable_items
)
load_scheme_main_f.set_canvas_width(208)
# Results frame creation
results_f = ResultsFrame(root)
resulter = Resulter(
results_f,
cross_section_manager,
load_scheme_manager
)
root.mainloop()
| StarcoderdataPython |
176460 | <reponame>stastnypremysl/ftputil
# Copyright (C) 2006-2018, <NAME> <<EMAIL>>
# and ftputil contributors (see `doc/contributors.txt`)
# See the file LICENSE for licensing terms.
"""
ftp_stat_cache.py - cache for (l)stat data
"""
import time
import ftputil.error
import ftputil.lrucache
# This module shouldn't be used by clients of the ftputil library.
__all__ = []
class StatCache:
"""
Implement an LRU (least-recently-used) cache.
`StatCache` objects have an attribute `max_age`. After this duration after
_setting_ it a cache entry will expire. For example, if you code
my_cache = StatCache()
my_cache.max_age = 10
my_cache["/home"] = ...
the value my_cache["/home"] can be retrieved for 10 seconds. After that,
the entry will be treated as if it had never been in the cache and should
be fetched again from the remote host.
Note that the `__len__` method does no age tests and thus may include some
or many already expired entries.
"""
# Default number of cache entries
_DEFAULT_CACHE_SIZE = 5000
def __init__(self):
# Can be reset with method `resize`
self._cache = ftputil.lrucache.LRUCache(self._DEFAULT_CACHE_SIZE)
# Never expire
self.max_age = None
self.enable()
def enable(self):
"""
Enable storage of stat results.
"""
self._enabled = True
def disable(self):
"""
Disable the cache. Further storage attempts with `__setitem__` won't
have any visible effect.
Disabling the cache only effects new storage attempts. Values stored
before calling `disable` can still be retrieved unless disturbed by a
`resize` command or normal cache expiration.
"""
# `_enabled` is set via calling `enable` in the constructor.
# pylint: disable=attribute-defined-outside-init
self._enabled = False
def resize(self, new_size):
"""
Set number of cache entries to the integer `new_size`. If the new size
is smaller than the current cache size, relatively long-unused elements
will be removed.
"""
self._cache.size = new_size
def _age(self, path):
"""
Return the age of a cache entry for `path` in seconds. If the path
isn't in the cache, raise a `CacheMissError`.
"""
try:
return time.time() - self._cache.mtime(path)
except ftputil.lrucache.CacheKeyError:
raise ftputil.error.CacheMissError(
"no entry for path {} in cache".format(path)
)
def clear(self):
"""
Clear (invalidate) all cache entries.
"""
self._cache.clear()
def invalidate(self, path):
"""
Invalidate the cache entry for the absolute `path` if present. After
that, the stat result data for `path` can no longer be retrieved, as if
it had never been stored.
If no stat result for `path` is in the cache, do _not_ raise an
exception.
"""
# XXX: To be 100 % sure, this should be `host.sep`, but I don't want to
# introduce a reference to the `FTPHost` object for only that purpose.
assert path.startswith("/"), "{} must be an absolute path".format(path)
try:
del self._cache[path]
except ftputil.lrucache.CacheKeyError:
# Ignore errors
pass
def __getitem__(self, path):
"""
Return the stat entry for the `path`. If there's no stored stat entry
or the cache is disabled, raise `CacheMissError`.
"""
if not self._enabled:
raise ftputil.error.CacheMissError("cache is disabled")
# Possibly raise a `CacheMissError` in `_age`
if (self.max_age is not None) and (self._age(path) > self.max_age):
self.invalidate(path)
raise ftputil.error.CacheMissError(
"entry for path {} has expired".format(path)
)
else:
# XXX: I don't know if this may raise a `CacheMissError` in case of
# race conditions. I prefer robust code.
try:
return self._cache[path]
except ftputil.lrucache.CacheKeyError:
raise ftputil.error.CacheMissError(
"entry for path {} not found".format(path)
)
def __setitem__(self, path, stat_result):
"""
Put the stat data for the absolute `path` into the cache, unless it's
disabled.
"""
assert path.startswith("/")
if not self._enabled:
return
self._cache[path] = stat_result
def __contains__(self, path):
"""
Support for the `in` operator. Return a true value, if data for `path`
is in the cache, else return a false value.
"""
try:
# Implicitly do an age test which may raise `CacheMissError`.
self[path]
except ftputil.error.CacheMissError:
return False
else:
return True
#
# The following methods are only intended for debugging!
#
def __len__(self):
"""
Return the number of entries in the cache. Note that this may include
some (or many) expired entries.
"""
return len(self._cache)
def __str__(self):
"""
Return a string representation of the cache contents.
"""
lines = []
for key in sorted(self._cache):
lines.append("{}: {}".format(key, self[key]))
return "\n".join(lines)
| StarcoderdataPython |
3540459 | import copy
import itertools
import os
import os.path as osp
import shutil
from collections import OrderedDict
from xml.dom.minidom import Document
import detectron2.utils.comm as comm
import torch
from detectron2.evaluation import COCOEvaluator
from detectron2.utils.file_io import PathManager
from .table_evaluation.evaluate import calc_table_score
class ICDAREvaluator(COCOEvaluator):
def evaluate(self, img_ids=None):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(predictions, f)
self._results = OrderedDict()
if "proposals" in predictions[0]:
self._eval_box_proposals(predictions)
if "instances" in predictions[0]:
self._eval_predictions(predictions, img_ids=img_ids)
self.evaluate_table(predictions)
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def evaluate_table(self, predictions):
xml_dir = self.convert_to_xml(predictions)
results = calc_table_score(xml_dir)
self._results["wF1"] = results['wF1']
def convert_to_xml(self, predictions):
output_dir = osp.join(self._output_dir, "xml_results")
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir, exist_ok=True)
coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
results_dict = {}
for result in coco_results:
if result["score"] < 0.7:
continue
image_id = result["image_id"]
if image_id not in results_dict:
results_dict[image_id] = []
results_dict[image_id].append(result)
for image_id, tables in results_dict.items():
file_name = f"cTDaR_t{image_id:05d}.jpg"
doc = Document()
root = doc.createElement('document')
root.setAttribute('filename', file_name)
doc.appendChild(root)
for table_id, table in enumerate(tables, start=1):
nodeManager = doc.createElement('table')
nodeManager.setAttribute('id', str(table_id))
bbox = list(map(int, table['bbox']))
bbox_str = '{},{} {},{} {},{} {},{}'.format(bbox[0], bbox[1],
bbox[0], bbox[1] + bbox[3],
bbox[0] + bbox[2], bbox[1] + bbox[3],
bbox[0] + bbox[2], bbox[1])
nodeCoords = doc.createElement('Coords')
nodeCoords.setAttribute('points', bbox_str)
nodeManager.appendChild(nodeCoords)
root.appendChild(nodeManager)
filename = '{}-result.xml'.format(file_name[:-4])
fp = open(os.path.join(output_dir, filename), 'w')
doc.writexml(fp, indent='', addindent='\t', newl='\n', encoding="utf-8")
fp.flush()
fp.close()
return output_dir
if __name__ == '__main__':
pass
| StarcoderdataPython |
6702595 | from .cleaner import Cleaner
from .capitalizationcleaner import CapitalizationCleaner
from .characterencodingcleaner import CharacterEncodingCleaner
from .diacriticcleaner import DiacriticCleaner
from .emoticoncleaner import EmoticonCleaner
from .hashtagcleaner import HashtagClener
from .htmlcleaner import HtmlCleaner
from .lemmatizationcleaner import LemmatizationCleaner
from .mentioncleaner import MentionCleaner
from .punctuationcleaner import PuntuationCleaner
from .stopwordscleaner import StopWordsCleaner
from .urlcleaner import UrlCleaner
from .wordlengtheningcleaner import WordLengtheningCleaner
from .twittercorpuscleaner import TwitterCorpusCleaner
from .numericcleaner import NumericCleaner
from .escapecleaner import EscapeCleaner
from .cleanersbuilder import CleanersBuilder
from .cleaningtask import CleaningTask
| StarcoderdataPython |
5066308 | <gh_stars>0
#!/usr/bin/env python
import sqlite3
def generate_sqlite(csv_path, sqlite_path):
with open(csv_path, 'rb') as f:
data = f.read().decode('utf8')
db = sqlite3.connect(sqlite_path)
cursor = db.cursor()
cursor.execute('CREATE TABLE resource_types (pid TEXT NOT NULL UNIQUE, resource_type TEXT NOT NULL)')
db.commit()
#make sure the line isn't empty, and skip the first header line
for line in [line for line in data.split('\n') if line.strip()][1:]:
pid, resource_type = line.split(',')
pid = pid.strip()
resource_type = resource_type.strip()
if pid and resource_type:
cursor.execute('INSERT INTO resource_types (pid, resource_type) VALUES (?, ?)', (pid, resource_type))
db.commit()
db.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Generate primo resource type sqlite db from csv')
parser.add_argument('--csv', dest='csv_path', help='CSV filename')
parser.add_argument('--sqlite', dest='sqlite_path', help='name of sqlite db')
args = parser.parse_args()
if args.csv_path and args.sqlite_path:
generate_sqlite(args.csv_path, args.sqlite_path)
else:
print(f'must pass in CSV and Sqlite arguments')
| StarcoderdataPython |
3360470 | # -*- coding: utf8 -*-
from django.contrib.sites.models import Site
from django.test import TestCase
import factory
from .models import Family, Name, Person, PersonFamily
# pylint: disable=no-member
class NameFactory(factory.django.DjangoModelFactory):
class Meta:
model = Name
name = factory.Sequence(lambda n: "Familyname%s" % n)
typ = Name.FAMILYNAME
class PersonFactory(factory.django.DjangoModelFactory):
class Meta:
model = Person
name = factory.RelatedFactory(NameFactory, 'person')
handle = factory.Sequence(lambda n: "P_%s" % n)
gender_type = Person.UNKNOWN
@factory.post_generation
def sites(self, create, extracted, **kwargs):
current_site = Site.objects.get_current()
self.sites.add(current_site)
class FamilyFactory(factory.django.DjangoModelFactory):
class Meta:
model = Family
@factory.post_generation
def sites(self, create, extracted, **kwargs):
current_site = Site.objects.get_current()
self.sites.add(current_site)
class PersonTest(TestCase):
def test_add_person(self):
p = PersonFactory()
self.assertEqual(str(p), ' %s' % p.handle)
def test_get_children(self):
father = PersonFactory()
mother = PersonFactory()
child1 = PersonFactory()
child2 = PersonFactory()
nonchild = PersonFactory()
family = FamilyFactory(father=father, mother=mother)
PersonFamily.objects.create(person=child1, family=family)
PersonFamily.objects.create(person=child2, family=family)
self.assertTrue(child1 in family.get_children())
self.assertTrue(child2 in family.get_children())
self.assertFalse(nonchild in family.get_children())
def test_person_get_father_mother_ancestors_descendants(self):
grandfather_f = PersonFactory()
grandmother_f = PersonFactory()
grandfather_m = PersonFactory()
grandmother_m = PersonFactory()
father = PersonFactory()
mother = PersonFactory()
child1 = PersonFactory()
child2 = PersonFactory()
family = FamilyFactory(father=grandfather_f, mother=grandmother_f)
PersonFamily.objects.create(person=father, family=family)
family = FamilyFactory(father=grandfather_m, mother=grandmother_m)
PersonFamily.objects.create(person=mother, family=family)
family = FamilyFactory(father=father, mother=mother)
PersonFamily.objects.create(person=child1, family=family)
PersonFamily.objects.create(person=child2, family=family)
self.assertEqual(father, child1.get_father())
self.assertEqual(mother, child2.get_mother())
self.assertEqual(None, grandfather_f.get_father())
self.assertEqual(None, grandfather_f.get_mother())
self.assertIn(grandfather_f, child1.ancestors())
self.assertIn(grandmother_f, child1.ancestors())
self.assertIn(grandfather_m, child1.ancestors())
self.assertIn(grandmother_m, child1.ancestors())
self.assertIn(father, child1.ancestors())
self.assertIn(mother, child1.ancestors())
self.assertNotIn(child1, child1.ancestors())
self.assertNotIn(child2, child1.ancestors())
self.assertEqual(len(child2.ancestors()), 6)
self.assertIn(child1, father.descendants())
self.assertIn(child1, grandfather_m.descendants())
self.assertEqual(len(grandfather_m.descendants()), 3)
family = Family.objects.get(father=grandfather_f)
self.assertIn(child1, family.get_grandchildren())
self.assertIn(child2, family.get_grandchildren())
self.assertNotIn(father, family.get_grandchildren())
self.assertEqual(len(family.get_grandchildren()), 2)
def test_person_resethandle(self):
p = PersonFactory(last_name='Abcdefgh')
self.assertTrue(p.handle.startswith('P_'))
p.reset_handle()
self.assertTrue(p.handle.startswith('P_'))
self.assertTrue(p.handle.endswith('-%d' % p.id))
self.assertIn(p.last_name, p.handle)
handle = p.handle
p.reset_handle()
self.assertEqual(handle, p.handle)
| StarcoderdataPython |
3290883 | <filename>tests/test_args_opts/conftest.py
# pylint: disable = redefined-outer-name, protected-access
import inspect
import pytest
from arger import Arger, Argument
from arger.docstring import ParamDocTp
from arger.main import FlagsGenerator
@pytest.fixture
def param_doc(hlp=''):
return ParamDocTp.init('', hlp)
@pytest.fixture
def add_arger():
def _add(argument):
par = Arger()
argument.add_to(par)
return par
return _add
@pytest.fixture
def gen_options():
return FlagsGenerator('-')
@pytest.fixture
def parameter(name, tp):
return inspect.Parameter(
name,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=tp,
)
@pytest.fixture
def argument(parameter, param_doc, gen_options) -> Argument:
return Argument.create(parameter, param_doc, gen_options)
@pytest.fixture
def parser(add_arger, argument):
return add_arger(argument)
| StarcoderdataPython |
1912732 | <gh_stars>100-1000
#
# Copyright (C) 2020 GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gsystree as st
class Clock_domain(st.Component):
def __init__(self, parent, name, frequency, factor=1):
super(Clock_domain, self).__init__(parent, name)
self.add_properties({
'vp_component': "vp.clock_domain_impl",
'frequency': frequency,
'factor': factor
})
def gen_gtkw(self, tree, comp_traces):
tree.add_trace(self, 'cycles', 'cycles', tag='clock')
tree.add_trace(self, 'period', 'period', tag='overview')
| StarcoderdataPython |
9649746 | <filename>piexif/_transplant.py<gh_stars>1000+
import io
from ._common import *
def transplant(exif_src, image, new_file=None):
"""
py:function:: piexif.transplant(filename1, filename2)
Transplant exif from filename1 to filename2.
:param str filename1: JPEG
:param str filename2: JPEG
"""
if exif_src[0:2] == b"\xff\xd8":
src_data = exif_src
else:
with open(exif_src, 'rb') as f:
src_data = f.read()
segments = split_into_segments(src_data)
exif = get_exif_seg(segments)
if exif is None:
raise ValueError("not found exif in input")
output_file = False
if image[0:2] == b"\xff\xd8":
image_data = image
else:
with open(image, 'rb') as f:
image_data = f.read()
output_file = True
segments = split_into_segments(image_data)
new_data = merge_segments(segments, exif)
if isinstance(new_file, io.BytesIO):
new_file.write(new_data)
new_file.seek(0)
elif new_file:
with open(new_file, "wb+") as f:
f.write(new_data)
elif output_file:
with open(image, "wb+") as f:
f.write(new_data)
else:
raise ValueError("Give a 3rd argument to 'transplant' to output file") | StarcoderdataPython |
5027134 | # Python 3.6.1
with open("input.txt", "r") as f:
puzzle_input = [int(i) for i in f.read()[0:-1]]
total = 0
puzzle_inputc = len(puzzle_input) // 2
for cur_index in range(len(puzzle_input)):
current = puzzle_input[cur_index]
pnext = puzzle_input[(cur_index + puzzle_input) % len(puzzle_input)]
if current == pnext:
total += current
print(total)
| StarcoderdataPython |
265027 | import datetime
from decimal import Decimal
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from .signals import user_matches_update
from .utils import calculate_match
from django.contrib.auth.signals import user_logged_in
from django.dispatch import receiver
# ====================================
# Match Model
# ====================================
class MatchQuerySet(models.query.QuerySet):
def all(self):
return self.filter(active=True)
def matches(self, user):
q1 = self.filter(user_a=user).exclude(user_b=user)
q2 = self.filter(user_b=user).exclude(user_a=user)
return (q1 | q2).distinct()
class MatchManager(models.Manager):
def get_queryset(self):
return MatchQuerySet(self.model, using=self._db)
def get_or_create_match(self, user_a=None, user_b=None):
try:
instance1 = self.get(user_a=user_a, user_b=user_b)
except:
instance1 = None
try:
instance2 = self.get(user_a=user_b, user_b=user_a)
except:
instance2 = None
retrieved_instance = instance1 or instance2
if retrieved_instance:
retrieved_instance.check_update()
return retrieved_instance, False
else:
new_instance = self.create(user_a=user_a, user_b=user_b)
new_instance.do_match()
return new_instance, True
def update_for_user(self, user):
qs = self.get_queryset().matches(user)
for instance in qs:
instance.do_match()
def update_all(self):
queryset = self.all()
now = timezone.now()
offset = now - datetime.timedelta(seconds=60)
offset2 = now - datetime.timedelta(hours=120)
queryset.filter(modified__gt=offset2).filter(modified__lte=offset)
for instance in queryset:
instance.check_update()
def get_matches(self, user):
queryset = self.get_queryset().matches(user).order_by('-match_decimal')
matches = []
for match in queryset:
if match.user_a == user:
items_wanted = [match.user_b]
matches.append(items_wanted)
elif match.user_b == user:
items_wanted = [match.user_a]
matches.append(items_wanted)
return matches
def get_matches_with_percent(self, user):
qs = self.get_queryset().matches(user).order_by('-match_decimal')
matches = []
for match in qs:
if match.user_a == user:
items_wanted = [match.user_b, match.percent]
matches.append(items_wanted)
elif match.user_b == user:
items_wanted = [match.user_a, match.percent]
matches.append(items_wanted)
return matches
class Match(models.Model):
"""Model stores information of two users."""
user_a = models.ForeignKey(User, related_name='user_a')
user_b = models.ForeignKey(User, related_name='user_b')
match_decimal = models.DecimalField(decimal_places=8, max_digits=16, default=0.00)
questions_answered = models.IntegerField(default=0)
created = models.DateTimeField(auto_now_add=True, auto_now=False)
modified = models.DateTimeField(auto_now_add=False, auto_now=True)
objects = MatchManager()
def __str__(self):
return str(self.match_decimal)
@property
def percent(self):
return str((self.match_decimal * Decimal(100)).quantize(Decimal('0.01')))
def do_match(self):
user_a = self.user_a
user_b = self.user_b
match_decimal, questions_answered = calculate_match(user_a, user_b)
self.match_decimal = match_decimal
self.questions_answered = questions_answered
self.save()
def check_update(self):
now = timezone.now()
offset = now - datetime.timedelta(seconds=60)
if self.modified <= offset or self.match_decimal == 0.0:
self.do_match()
def user_matches_update_receiver(sender, user, *args, **kwargs):
Match.objects.update_for_user(user)
# connect to signal
user_matches_update.connect(user_matches_update_receiver)
@receiver(user_logged_in)
def get_user_matches_receiver(sender, request, user, *args, **kwargs):
for u in User.objects.exclude(username=user.username).order_by("-id"):
Match.objects.get_or_create_match(user_a=u, user_b=user)
| StarcoderdataPython |
5105434 | cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_available = cars - drivers
carpool = drivers * space_in_a_car
average_passengers_per_car = passengers / drivers
print "There are", cars, "cars available."
print "There are only", drivers, "drivers available."
print "There will be", cars_available, "cars available."
print carpool, "passengers to carpool available."
print carpool, "passengers to carpool available."
print carpool, "passengers to carpool available."
| StarcoderdataPython |
11340572 | # -*- coding: utf-8 -*-
#################################################################################
## Copyright (c) 2018-Present Webkul Software Pvt. Ltd. (<https://webkul.com/>)
# You should have received a copy of the License along with this program.
# If not, see <https://store.webkul.com/license.html/>
#################################################################################
from odoo import models, fields, api, _
AmountOption=[
('fixed', 'Fixed Amount'),
('percentage', '% of Product Price')
]
class ProductPackaging(models.Model):
_inherit = 'product.packaging'
_rec_name='display_name'
@api.one
@api.depends('package_carrier_type','name')
def _complete_name(self):
name = self.name
if self.package_carrier_type:
name += " [%s]" % (self.package_carrier_type)
self.display_name = name
@api.model
def get_cover_amount(self,amount):
if self.cover_amount_option == 'fixed':
return self.cover_amount
return amount* self.cover_amount / 100
cover_amount_option = fields.Selection(
selection = AmountOption,
default ='percentage',
required=1,
)
cover_amount = fields.Integer(
string='Cover Amount',
default =10,
help="""This is the declared
value/cover amount for an individual package."""
)
display_name = fields.Char(
compute=_complete_name,
string="Complete Name",
)
product_tmpl_ids = fields.Many2many(
'product.template',
'product_tmp_product_packaging_rel',
'packaging_id',
'product_tmpl_id',
string='Template'
)
| StarcoderdataPython |
3392153 | <reponame>BaldFish-Tong/push_weather<filename>main.py
from push_weather.info import info
from push_weather.Crawling_Weather import crawling_weather
from push_weather.Send_Message import send_message
if __name__ == '__main__':
send_message(crawling_weather(info)) | StarcoderdataPython |
11270890 | """delete column seen
Revision ID: c28b6d5e6c4c
Revises:
Create Date: 2020-11-05 06:07:06.334130
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c28b6d5e6c4c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('admins',
sa.Column('admin_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(), nullable=True),
sa.Column('approved', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('admin_id')
)
op.create_table('currentquestions',
sa.Column('question_id', sa.Integer(), nullable=False),
sa.Column('question', sa.String(), nullable=True),
sa.Column('subject', sa.String(length=255), nullable=True),
sa.Column('grade', sa.Integer(), nullable=True),
sa.Column('seen', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('question_id')
)
op.create_table('questions',
sa.Column('question_id', sa.Integer(), nullable=False),
sa.Column('question', sa.String(), nullable=True),
sa.Column('subject', sa.String(length=255), nullable=True),
sa.Column('grade', sa.Integer(), nullable=True),
sa.Column('seen', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('question_id')
)
op.create_table('students',
sa.Column('student_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('phone', sa.Integer(), nullable=True),
sa.Column('questions', sa.ARRAY(sa.String()), nullable=True),
sa.PrimaryKeyConstraint('student_id')
)
op.create_table('answers',
sa.Column('answer_id', sa.Integer(), nullable=False),
sa.Column('answer', sa.String(length=255), nullable=True),
sa.Column('question_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['question_id'], ['questions.question_id'], ),
sa.PrimaryKeyConstraint('answer_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('answers')
op.drop_table('students')
op.drop_table('questions')
op.drop_table('currentquestions')
op.drop_table('admins')
# ### end Alembic commands ###
| StarcoderdataPython |
11222965 | <reponame>LS80/script.module.pyrollbar<filename>lib/rollbar/kodi/__init__.py
import json
import platform
import xbmc
import xbmcaddon
import xbmcgui
import rollbar
addon = xbmcaddon.Addon('script.module.pyrollbar')
def _kodi_version():
query = dict(jsonrpc='2.0',
method='Application.GetProperties',
params=dict(properties=['version', 'name']),
id=1)
response = json.loads(xbmc.executeJSONRPC(json.dumps(query)))
return response['result']['version']
def error_report_requested(exc):
return xbmcgui.Dialog().yesno(
addon.getLocalizedString(32001),
"\n".join((
addon.getLocalizedString(32002),
"[COLOR=red]{}[/COLOR]".format(exc),
addon.getLocalizedString(32003)
))
)
def report_error(access_token, version=None, url=None):
data = {
'machine': platform.machine(),
'platform': platform.system(),
'kodi': _kodi_version(),
'url': url,
}
rollbar.init(access_token, code_version=version)
rollbar.report_exc_info(extra_data=data)
| StarcoderdataPython |
1879260 | <reponame>gloriousDan/recipe-scrapers
from recipe_scrapers.whatsgabycooking import WhatsGabyCooking
from tests import ScraperTest
class TestWhatsGabyCookingScraper(ScraperTest):
scraper_class = WhatsGabyCooking
def test_host(self):
self.assertEqual("whatsgabycooking.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://whatsgabycooking.com/vegetarian-quinoa-bake/",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Vegetarian Quinoa Bake")
def test_total_time(self):
self.assertEqual(45, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("6 servings", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://cdn.whatsgabycooking.com/wp-content/uploads/2017/10/WGC-Quinoa-Bake-copy-2.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertCountEqual(
[
"1 ½ cups uncooked multi-colored quinoa",
"2 cups shredded colby jack cheese divided",
"1 cup shredded mozzarella cheese divided",
"1 cup canned black beans rinsed and drained",
"1 cup frozen charred corn trader joes",
"1 4.5-ounce can chopped green chiles",
"1 ½ cup Gaby's chipotle or tomatillo salsa",
"Kosher salt and pepper to taste",
"Finely chopped scallions and cilantro as garnish",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"Preheat the oven to 375 degrees F.\nCook the quinoa according to the package directions. Once cooked, remove from heat and transfer the cooked quinoa into a large bowl.\nFold in 1 1/2 cups of the shredded colby jack cheese, ½ cup of the shredded mozzarella, black beans, corn, green chiles and salsa. Season the entire mixture with salt and pepper and stir to combine.\nLightly spray a medium sized skillet with non-stick spray, and transfer the mixture into the skillet. Top with the remaining shredded cheeses and bake for about 20-25 minutes until the top layer of cheese is bubbly and melted.\nRemove the baking dish from the oven and garnish with green onions and cilantro and serve.",
self.harvester_class.instructions(),
)
| StarcoderdataPython |
4809603 | # coding: utf-8
from __future__ import print_function, unicode_literals
import mock
import time
import threading
import json
import unittest2
import pdb
import logging
import logtail
from logtail.formatter import LogtailFormatter
from logtail.helpers import LogtailContext
class TestLogtailFormatter(unittest2.TestCase):
def setUp(self):
self.context = LogtailContext()
self.customer = {'id': '1'}
self.order = {'id': '1234', 'amount': 200, 'item': '#19849'}
def _check_and_get_line(self, loglines):
self.assertEqual(len(loglines), 1)
return loglines[0]
def test_format_emits_single_line(self):
formatter = logtail.LogtailFormatter(context=self.context)
logger, loglines = logger_and_lines(formatter)
self.assertFalse(loglines)
logger.info('Hello\n\n\n\n\n\nWorld')
line = self._check_and_get_line(loglines)
self.assertEqual(len(line.split('\n')), 1)
def test_format_creates_json_serialized_frame_with_context(self):
formatter = logtail.LogtailFormatter(context=self.context)
logger, loglines = logger_and_lines(formatter)
self.assertFalse(loglines)
with self.context(customer=self.customer):
logger.info('Received order id=%s', self.order['id'], extra={'order': self.order})
line = self._check_and_get_line(loglines)
frame = json.loads(line)
self.assertEqual(frame['message'], 'Received order id=%s' % self.order['id'])
self.assertEqual(frame['order'], self.order)
self.assertEqual(frame['context']['customer'], self.customer)
def test_format_collapses_context(self):
formatter = logtail.LogtailFormatter(context=self.context)
logger, loglines = logger_and_lines(formatter)
self.assertFalse(loglines)
with self.context(customer=self.customer):
with self.context(customer={'trusted': True}):
logger.info('Received an order', extra={'order': self.order})
line = self._check_and_get_line(loglines)
frame = json.loads(line)
self.assertEqual(frame['message'], 'Received an order')
self.assertEqual(frame['order'], self.order)
self.assertEqual(frame['context']['customer'], {'id': self.customer['id'], 'trusted': True})
def test_format_with_custom_default_json_serializer(self):
def suppress_encoding_errors(obj):
return 'Could not encode type=%s' % type(obj).__name__
default_formatter = logtail.LogtailFormatter(context=self.context)
default_logger, _ = logger_and_lines(default_formatter, 'default')
suppress_formatter = logtail.LogtailFormatter(context=self.context, json_default=suppress_encoding_errors)
suppress_logger, loglines = logger_and_lines(suppress_formatter, 'suppress')
self.assertIsNot(default_logger, suppress_logger)
with self.context(data={'not_encodable': Dummy()}):
with self.assertRaises(TypeError):
default_logger.info('hello')
suppress_logger.info('goodbye')
line = self._check_and_get_line(loglines)
frame = json.loads(line)
self.assertEqual(frame['message'], 'goodbye')
self.assertEqual(frame['context']['data'], {'not_encodable': 'Could not encode type=Dummy'})
def test_format_with_custom_default_json_encoder(self):
default_formatter = logtail.LogtailFormatter(context=self.context)
default_logger, _ = logger_and_lines(default_formatter, 'default')
dummy_capable_formatter = logtail.LogtailFormatter(context=self.context, json_encoder=DummyCapableEncoder)
dummy_capable_logger, loglines = logger_and_lines(dummy_capable_formatter, 'dummy_capable')
self.assertIsNot(default_logger, dummy_capable_logger)
with self.context(data={'not_encodable': Dummy()}):
with self.assertRaises(TypeError):
default_logger.info('hello')
dummy_capable_logger.info('goodbye')
line = self._check_and_get_line(loglines)
frame = json.loads(line)
self.assertEqual(frame['message'], 'goodbye')
self.assertEqual(frame['context']['data'], {'not_encodable': '<Dummy instance>'})
class Dummy(object):
""" Because this is a custom class, it cannot be encoded by the default JSONEncoder. """
class DummyCapableEncoder(json.JSONEncoder):
""" A JSONEncoder that can encode instances of the Dummy class. """
def default(self, obj):
if isinstance(obj, Dummy):
return '<Dummy instance>'
return super(CustomEncoder, self).default(obj)
class ListHandler(logging.Handler):
""" Accumulates all log lines in a list for testing purposes. """
def __init__(self, *args, **kwargs):
super(ListHandler, self).__init__(*args, **kwargs)
self.lines = []
def emit(self, record):
logline = self.format(record)
self.lines.append(logline)
def logger_and_lines(formatter, name=__name__):
""" Helper for more easily writing formatter tests. """
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.handlers = []
handler = ListHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger, handler.lines
| StarcoderdataPython |
6533616 | <gh_stars>0
import random
import csv
# The Feminism plen name was changed because of an error in the gsheet
plen = ["China and Africa's New Era", "Cryptocurrencies: Friend or Foe?", "Drug Legalization in a Progressive World", "The Global Food Crisis", "The Future of Feminism", "Nuclear Weapons: Obsolete or the Future?"]
data = []
school = "Upper Canada College"
def allUnique(x):
seen = set()
return not any(i in seen or seen.add(i) for i in x)
# This just imports the CSV and does some counting on plenary information.
with open('gsheets-students.csv', 'rb') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
final = [row[2], school]
preflist = [row[3],row[4],row[5],row[6]]
if (allUnique(preflist) == False):
temp = ["True","True","True","True","False","False"]
random.shuffle(temp)
final = final + temp
else:
for i in range(6):
if plen[i] in preflist:
final.append("True")
else:
final.append("False")
data.append(final)
data.pop(0)
with open('formatted-students.csv', 'wb') as csvfile:
writer = csv.writer(csvfile)
for row in data:
print row
writer.writerow(row)
| StarcoderdataPython |
1890147 | import csv
import urllib2
import json
import datetime
import re
from bs4 import BeautifulSoup
from bs4 import NavigableString
from bs4 import Tag
## Loops through team list, gets links for all teams
player_table = []
current_player_table = []
errors =[]
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
# url = "http://www.nfl.com/players/search?category=lastName&playerType=historical&d-447263-p="
url = "http://www.nfl.com/players/search?category=lastName&playerType=current&d-447263-p="
## Generate tables of all possible URLs
urls = []
for l in alphabet:
for n in range(8):
u1 = url + str(n+1)
u2 = u1 + "&filter=" + l
urls.append(u2)
## Loop over urls and get player info and links
for u in urls:
print(u)
try:
soup = BeautifulSoup(urllib2.urlopen(u).read())
table = soup.find('table', { "id":"result" })
rows = table.findAll('tr')
for r in range(len(rows)):
if r != 0:
cells = rows[r].findAll('td')
# name = cells[0]('a')[0].contents[0]
# link = cells[0]('a')[0]['href']
name = cells[2]('a')[0].contents[0]
link = cells[2]('a')[0]['href']
# career = cells[2].contents[0]
# start_career = career[0:4]
# end_career = career[-4:]
player_entry = (name,link)
current_player_table.append(player_entry)
except Exception as e:
print(e)
errors.append(u)
write_to_csv("current_player_urls_from_nfl.csv",current_player_table)
## Look up players in player list
player_bios = []
missing = []
players = read_table("to_look_up.csv",True)
## Get current player info
for p in players:
## Current players
if p[2] == '1000':
print(p[0] + " --- " + p[1])
url = "http://www.nfl.com" + p[1]
soup = BeautifulSoup(urllib2.urlopen(url).read())
info = soup.find("div", { "class" : "player-info" })
para = info.findAll('p')
name = p[0]
strongs = info.findAll('strong')
if len(strongs) == 7:
born = strongs[3].nextSibling.strip()
college = strongs[4].nextSibling.strip()[1:].strip()
experience = strongs[5].nextSibling.strip()
if experience == ': Rookie':
first_year = 2013
else:
number = re.search("\d", experience)
first_year = 2014 - int(experience[number.start()])
letter = re.search("[a-zA-Z]+",born)
if letter == None:
hometown = ''
state = ''
else:
birthplace = born[letter.start():]
splits = birthplace.split(',')
if len(splits) == 2:
hometown = birthplace.split(',')[0].strip()
state = birthplace.split(',')[1].strip()
else:
hometown = ''
state = ''
player_info = (name,first_year,birthplace,hometown,state,college)
player_bios.append(player_info)
else:
missing.append((p[0],p[1]))
print('missing!!')
write_to_csv("current_player_bios.csv",player_bios)
historic_player_bios = []
players = read_table("to_do.csv",True)
## Get historic player info
for p in players:
## historic players
if p[2] != '1000':
name = ''
first_year = ''
birthplace = ''
hometown = ''
state = ''
college = ''
print(p[0] + " --- " + p[1])
url = "http://www.nfl.com" + p[1]
soup = BeautifulSoup(urllib2.urlopen(url).read())
info = soup.find("div", { "class" : "player-info" })
para = info.findAll('p')
name = p[0]
strongs = info.findAll('strong')
if len(strongs) == 6:
born = strongs[3].nextSibling.strip()
college = strongs[4].nextSibling.strip()[1:].strip()
first_year = p[2]
letter = re.search("[a-zA-Z]+",born)
if letter != None:
birthplace = born[letter.start():]
splits = birthplace.split(',')
if len(splits) == 2:
hometown = birthplace.split(',')[0].strip()
state = birthplace.split(',')[1].strip()
player_info = (name,first_year,birthplace,hometown,state,college)
historic_player_bios.append(player_info)
else:
missing.append((p[0],p[1]))
print('missing!!')
write_to_csv("historic_nfl_bios.csv",historic_player_bios)
def write_to_csv(csv_name,array):
columns = len(array[0])
rows = len(array)
with open(csv_name, "wb") as test_file:
file_writer = csv.writer(test_file)
for i in range(rows):
file_writer.writerow([array[i][j] for j in range(columns)])
def strip_special(array,columns_with_string):
new_table = []
for i in array:
new_row =[]
for j in range(len(i)):
if j in columns_with_string:
x = i[j].encode('utf-8').strip()
else:
x = i[j]
new_row.append(x)
new_table.append(new_row)
return new_table
def read_table(csv_name,include_header):
table = []
with open(csv_name, 'Ub') as csvfile:
f = csv.reader(csvfile, delimiter=',')
firstline = True
for row in f:
if firstline == False or include_header == True:
table.append(tuple(row))
firstline = False
return table | StarcoderdataPython |
11205610 | <reponame>dungdinhanh/mmselfsup
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import time
import warnings
import torch
import mmcv
from mmcv.runner.base_runner import BaseRunner
from mmcv.runner.epoch_based_runner import EpochBasedRunner
from mmcv.runner.builder import RUNNERS
from mmcv.runner.checkpoint import save_checkpoint
from mmcv.runner.utils import get_host_info
from mmcv.runner.epoch_based_runner import EpochBasedRunner
@RUNNERS.register_module()
class EpochBasedRunnerLogMin(EpochBasedRunner):
"""KD-based Runner.
This runner train models epoch by epoch. For each epoch, the runner feed in the teacher model.
"""
def __init__(self,
model,
batch_processor=None,
optimizer=None,
work_dir=None,
logger=None,
meta=None,
max_iters=None,
max_epochs=None):
super(EpochBasedRunnerLogMin, self).__init__(
model=model,
batch_processor=batch_processor,
optimizer=optimizer,
work_dir=work_dir,
logger=logger,
meta=meta,
max_iters=max_iters,
max_epochs=max_epochs)
self.min_loss_epoch = []
self.MAX_VAL = 99999
self.current_min_loss_iter1 = self.MAX_VAL
self.current_min_loss_iter2 = self.MAX_VAL
self.current_min_loss_epoch1 = self.MAX_VAL
self.current_min_loss_epoch2 = self.MAX_VAL
self.log_min_epoch_file = os.path.join(self.work_dir, "min_loss_epochs.csv")
self.f = None
def run_iter(self, data_batch, train_mode, **kwargs):
if self.batch_processor is not None:
outputs = self.batch_processor(
self.model, data_batch, train_mode=train_mode, **kwargs)
elif train_mode:
outputs, save_min = self.model.train_step(data_batch, self.optimizer,
**kwargs)
if self.current_min_loss_iter1 > save_min[0]:
self.current_min_loss_iter1 = save_min[0]
if self.current_min_loss_iter2 > save_min[1]:
self.current_min_loss_iter2 = save_min[1]
else:
outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)
if not isinstance(outputs, dict):
raise TypeError('"batch_processor()" or "model.train_step()"'
'and "model.val_step()" must return a dict')
if 'log_vars' in outputs:
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
def train(self, data_loader, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loader = data_loader
self._max_iters = self._max_epochs * len(self.data_loader)
self.call_hook('before_train_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
self.call_hook('before_train_iter')
self.run_iter(data_batch, train_mode=True, **kwargs)
self.call_hook('after_train_iter')
self._iter += 1
self.current_min_loss_epoch1 = self.current_min_loss_iter1
self.current_min_loss_epoch2 = self.current_min_loss_iter2
self.min_loss_epoch.append([self.current_min_loss_epoch1, self.current_min_loss_iter2])
self.f.write("%d, %f, %f\n"%(self._epoch, self.current_min_loss_epoch1, self.current_min_loss_epoch2))
self.current_min_loss_epoch1 = self.MAX_VAL
self.current_min_loss_epoch2 = self.MAX_VAL
self.current_min_loss_iter1 = self.MAX_VAL
self.current_min_loss_iter2 = self.MAX_VAL
self.call_hook('after_train_epoch')
self._epoch += 1
def run(self, data_loaders, workflow, max_epochs=None, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
and validation.
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs. E.g, [('train', 2), ('val', 1)] means
running 2 epochs for training and 1 epoch for validation,
iteratively.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert len(data_loaders) == len(workflow)
if max_epochs is not None:
warnings.warn(
'setting max_epochs in run is deprecated, '
'please set max_epochs in runner_config', DeprecationWarning)
self._max_epochs = max_epochs
assert self._max_epochs is not None, (
'max_epochs must be specified during instantiation')
for i, flow in enumerate(workflow):
mode, epochs = flow
if mode == 'train':
self._max_iters = self._max_epochs * len(data_loaders[i])
break
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
self.logger.info('Start running, host: %s, work_dir: %s',
get_host_info(), work_dir)
self.logger.info('Hooks will be executed in the following order:\n%s',
self.get_hook_info())
self.logger.info('workflow: %s, max: %d epochs', workflow,
self._max_epochs)
self.call_hook('before_run')
self.f = open(self.log_min_epoch_file, "w")
while self.epoch < self._max_epochs:
for i, flow in enumerate(workflow):
mode, epochs = flow
if isinstance(mode, str): # self.train()
if not hasattr(self, mode):
raise ValueError(
f'runner has no method named "{mode}" to run an '
'epoch')
epoch_runner = getattr(self, mode)
else:
raise TypeError(
'mode in workflow must be a str, but got {}'.format(
type(mode)))
for _ in range(epochs):
if mode == 'train' and self.epoch >= self._max_epochs:
break
epoch_runner(data_loaders[i], **kwargs)
self.f.close()
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_run')
| StarcoderdataPython |
11212240 | from analysis.convert_analysis_files_to_kgtk_edge import KGTKAnalysis
from argparse import ArgumentParser
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-f', '--folder', action='store', dest='folder_path',
help="folder where all files will be created")
parser.add_argument('-e', '--edge-file-labels', action='store', dest='edge_file_labels_descriptions')
args = parser.parse_args()
folder_path = args.get('folder_path')
edge_file = args.get('edge_file_labels_descriptions')
ka = KGTKAnalysis(folder_path)
ka.convert_node_labels_to_edge(edge_file)
| StarcoderdataPython |
8009210 | <gh_stars>0
import os, sys
if len(sys.argv) < 2:
print('Usage: prepare.py <prefix> <new>')
exit()
prefix = sys.argv[1]
new = sys.argv[2]
files = os.listdir()
for f in files:
if f.startswith(prefix):
name = f.replace(prefix,new)
try:
os.rename(f, name)
except Exception:
print('Failed to rename file: ' + f + ' to ' + name)
| StarcoderdataPython |
3522135 | # -*- coding: utf-8 -*-
"""
@author:XuMing(<EMAIL>)
@description:
"""
import os
import sys
sys.path.append('..')
from dialogbot import Bot
from dialogbot.utils.io import save_json, load_json
from dialogbot.utils.log import logger
class BotServer:
def __init__(self, cache_path='cache.json'):
self.bot = Bot()
self.cache = {}
self.cache_path = cache_path
if os.path.exists(cache_path):
self.cache = load_json(cache_path)
logger.info("use cache, cache file: %s" % cache_path)
def answer(self, query):
if query in self.cache:
response = self.cache[query]
else:
response = self.bot.answer(query)
self.cache[query] = response
if self.cache_path:
save_json(self.cache, self.cache_path)
logger.info("save cache.")
return response
if __name__ == '__main__':
# Batch queries
server = BotServer()
query_list = [
"王者荣耀哪个英雄最秀",
"姚明有多高?",
"姚明老婆是谁",
"北京天气",
"上海天气",
"雅阁现在多少钱",
"王者荣耀哪个英雄最贵?",
"百日咳什么症状?",
"百日咳要治疗多久?",
"百日咳不能吃啥?",
"介绍一下百日咳",
"百日咳用啥药物?",
"百日咳的预防措施有哪些?",
]
for i in query_list:
r = server.answer(i)
print(i, r)
while True:
sys.stdout.flush()
input_text = input("user:")
if input_text == 'q':
break
print("chatbot:", server.answer(input_text))
| StarcoderdataPython |
8093995 | <gh_stars>1-10
import json
import pandas as pd
import numpy as np
import re
from textblob import TextBlob
import nltk
nltk.download('wordnet')
nltk.download('omw-1.4')
from nltk.corpus import wordnet as wn
nltk.download('stopwords')
from nltk.corpus import stopwords
class DbSearch:
def __init__(self,country,funding,keywords):
self.country = country
self.funding = int(funding)
self.keywords = keywords
self.pattern = r'''(?x) # set flag to allow verbose regexps
(?:[a-zA-Z]\.)+ # abbreviations, e.g. U.S.A.
| \w+(?:-\w+)* # words with optional internal hyphens
| \$?\d+(?:\,.\d+)?%? # currency and percentages, e.g. $12.40, 82%
| \.\.\. # ellipsis
| [][.,;"'?():-_`] # these are separate tokens; includes ], [
'''
self.stopwd = set(stopwords.words('english'))
def translation(self,words_list):
tmp_cat = nltk.regexp_tokenize(words_list, self.pattern)
tmp_cat = [re.sub(r'[^A-Za-z0-9]+', '',token) for token in tmp_cat ]
tmp_cat = [token for token in tmp_cat if token ]
en_tmp_cat = []
for word in tmp_cat:
tmp_blob = TextBlob(word)
tmp_word = tmp_blob.translate(to='en')
tmp_word = str(tmp_word)
en_tmp_cat.append(tmp_word)
return en_tmp_cat
def country_det(self,token_list,country):
country = country.lower()
l_tl = [x.lower() for x in token_list]
if country in l_tl:
return True
def ratings(self,in_ss,vs_ss):
qualifications = {}
q2 = {}
for key,value in vs_ss.items():
partial_q = []
tmp_string = []
for skey,svalue in in_ss.items(): #in_ss siempre será un sólo objeto
tmp = []
tmp_string.append(skey)
for tkey,tvalue in value.items():
similarity = svalue.path_similarity(tvalue)
tmp.append(similarity)
tmp_q = sum(tmp) / len(tmp)
partial_q.append(tmp_q)
q1 = sum(partial_q) / len(partial_q)
qualifications[f'{list(value.keys())} : {tmp_string}'] = q1
q2[f'{list(value.keys())}'] = q1
print(sorted(qualifications.items(), key=lambda x: x[1], reverse=True))
return qualifications, q2
def db_searcher(self):
with open('outputs/cleaned_opportunities.json','r',encoding='utf-8') as opportunities:
op_dict = json.load(opportunities)
op_df = pd.DataFrame.from_dict(op_dict)
op_df['str_t_cat'] = op_df['t_cat'].astype(str)
cats = op_df['t_cat'].value_counts()
cats_list = list(cats.index)
vs_ss = {}
for i in range(len(cats_list)):
tmp = {}
for initial in cats_list[i]:
ss = wn.synsets(initial.lower())
meaning = ss[0]
tmp[initial] = meaning
vs_ss[i] = tmp
in_0 = self.translation(self.keywords)
in_ss = {}
for initial in in_0:
ss = wn.synsets(initial.lower())
meaning = ss[0]
in_ss[initial] = meaning
q, q2 = self.ratings(in_ss,vs_ss)
best_category = sorted(q2.items(), key=lambda x: x[1], reverse=True)[0][0]
target_1 = op_df[op_df['str_t_cat'] == best_category]
country = self.country
country_idx = target_1['t_desc'].map(lambda x: self.country_det(x,country))
how_many = sum([ 1 for value in country_idx if value == True])
if how_many == 0:
target = target_1.nlargest(5,'funding')
elif how_many < 5:
idxs = list(country_idx.where(country_idx == True).dropna().index)
df_country = target_1.loc[idxs,:]
target_1.drop(labels=idxs ,axis=0,inplace=True)
df_others = target_1.nlargest(5-how_many,'funding')
target = df_country.append(df_others)
elif how_many >= 5:
idxs = list(country_idx.where(country_idx == True).dropna().index)
df_country = target_1.loc[idxs,:]
target = df_country.nlargest(5,'funding')
target.to_excel('outputs/opportunity.xlsx')
| StarcoderdataPython |
3207558 | <gh_stars>0
#!/usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
from EmeraldAI.Logic.Singleton import Singleton
from EmeraldAI.Config.Config import Config
if(Config().Get("Database", "WiFiFingerprintDatabaseType").lower() == "sqlite"):
from EmeraldAI.Logic.Database.SQlite3 import SQlite3 as db
elif(Config().Get("Database", "WiFiFingerprintDatabaseType").lower() == "mysql"):
from EmeraldAI.Logic.Database.MySQL import MySQL as db
class PositionGrabber(object):
__metaclass__ = Singleton
__timeout = 3
def __init__(self):
rospy.init_node("Position_grabber", anonymous=True)
def GetLivePosition(self):
try:
msg = rospy.wait_for_message("/odometry/filtered", Odometry, self.__timeout)
except Exception:
return None
return msg.pose.pose
def GetDatabasePosition(self, pose, range=0.01):
p = pose.position
query = """SELECT *
FROM Location_Map_Position
Where PointX BETWEEN {0} and {1}
AND PointY BETWEEN {2} and {3}
AND PointZ BETWEEN {4} and {5}"""
position = db().Fetchall(query.format((p.x-range), (p.x+range), (p.y-range), (p.y+range), (p.z-range), (p.z+range)))
if len(position) > 0:
return position
return None
def CreatePosition(self, pose, ignoreOrientation=False):
p = pose.position
o = pose.orientation
if(ignoreOrientation):
query = "INSERT INTO Location_Map_Position ('PointX', 'PointY', 'PointZ') Values ('{0}', '{1}', '{2}')".format(p.x, p.y, p.z)
else:
query = "INSERT INTO Location_Map_Position ('PointX', 'PointY', 'PointZ', 'OrientationX', 'OrientationY', 'OrientationZ', 'OrientationW') Values ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}')".format(p.x, p.y, p.z, o.x, o.y, o.z, o.w)
return db().Execute(query)
| StarcoderdataPython |
12832003 | from model import YOLOv1
import torch
import torch.nn as nn
class YOLOv1Loss(nn.Module):
def __init__(self, S=7, B=2, C=20):
"""
__init__ initialize YOLOv1 Loss.
Args:
S (int, optional): split_size. Defaults to 7.
B (int, optional): number of boxes. Defaults to 2.
C (int, optional): number of classes. Defaults to 20.
"""
super().__init__()
self.mse = nn.MSELoss(reduction="sum")
self.S = S
self.B = B
self.C = C
self.l_noobl = 0.5
self.l_coord = 5
def forward(self, predictions, target):
predictions = predictions.reshape(-1, self.S, self.S, self.C + Self.B*5)
iou_b1 = get_iou(predictions[...,21:25], target[...,21:25])
iou_b2 = get_iou(predictions[...,26:30], target[...,21:25])
ious = torch.stack([iou_b1, iou_b2], 0)
_, max_iou = torch.max(ious, dim=0)
exists_box = target[...,20].unsqueeze(3) # select target objectness.object
# * Box Coordinates Loss
# Select the bounding boxes with highest IoU
box_predictions = exists_box * (
(
max_iou * predictions[..., 26:30] +
(1 - max_iou) * predictions[..., 21:25]
)
)
# Select targets which has an object
box_targets = exists_box * target[...,21:25]
box_predictions[...,2:4] = torch.sign(box_predictions[...,2:4]) * torch.sqrt(
torch.abs(box_predictions[..., 2:4]) + 1e-6
)
box_targets[..., 2:4] = torch.sqrt(box_targets[..., 2:4])
box_loss = self.mse(
torch.flatten(box_predictions, end_dim=-2),
torch.flatten(box_targets, end_dim=-2)
)
# * Object Losss
pred_box = (
max_iou * predictions[..., 25:26] +
(1-max_iou) * predictions[..., 20:21]
)
object_loss = self.mse(
torch.flatten(exists_box * pred_box),
torch.flatten(exists_box * target[..., 20:21])
)
# * No Object Loss
# For the first box
no_boject_loss = self.mse(
torch.flatten((1-max_iou) * predictions[...,20:21], start_dim=1),
torch.flatten((1-max_iou) * target[...,20:21], start_dim=1)
)
# For the second box
no_boject_loss += self.mse(
torch.flatten(max_iou * predictions[...,25:26], start_dim=1),
torch.flatten(max_iou * target[...,20:21], start_dim=1)
)
# * Class prediction Loss
class_loss = self.mse(
torch.flatten(exists_box * predictions[...,:20], end_dim=-2),
torch.flatten(exists_box * target[...,:20], end_dim=-2)
)
# * Total Loss
loss = (
self.l_coord * box_loss
+ object_loss
+ self.l_noobl * no_boject_loss
+ class_loss
)
return loss
| StarcoderdataPython |
101448 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 Wuhan PS-Micro Technology Co., Itd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy, sys
import moveit_commander
class MoveItFkDemo:
def __init__(self):
# 初始化move_group的API
moveit_commander.roscpp_initialize(sys.argv)
# 初始化ROS节点
rospy.init_node('moveit_fk_demo', anonymous=True)
# 初始化需要使用move group控制的机械臂中的arm group
arm = moveit_commander.MoveGroupCommander('manipulator')
# 设置机械臂运动的允许误差值
arm.set_goal_joint_tolerance(0.001)
# 设置允许的最大速度和加速度
arm.set_max_acceleration_scaling_factor(0.5)
arm.set_max_velocity_scaling_factor(0.5)
# 控制机械臂先回到初始化位置
arm.set_named_target('home')
arm.go()
rospy.sleep(1)
# 设置机械臂的目标位置,使用六轴的位置数据进行描述(单位:弧度)
joint_positions = [0.391410, -0.676384, -0.376217, 0.0, 1.052834, 0.454125]
arm.set_joint_value_target(joint_positions)
# 控制机械臂完成运动
arm.go()
rospy.sleep(1)
# 控制机械臂先回到初始化位置
arm.set_named_target('home')
arm.go()
rospy.sleep(1)
# 关闭并退出moveit
moveit_commander.roscpp_shutdown()
moveit_commander.os._exit(0)
if __name__ == "__main__":
try:
MoveItFkDemo()
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
1846470 | #! /usr/bin/env python
# License: Apache 2.0. See LICENSE file in root directory.
#
# For simple behaviors that can run syncronously, Python provides
# a simple way to implement this. Add the work of your behavior
# in the execute_cb callback
#
import rospy
import actionlib
import behavior_common.msg
import time
import random
from std_msgs.msg import Float64
from std_msgs.msg import Bool
from std_msgs.msg import Empty
# for talking
import actionlib
import actionlib.action_client
import audio_and_speech_common.msg
# for servos
#from sheldon_servos.head_servo_publishers import *
#from sheldon_servos.right_arm_servo_publishers import *
#from sheldon_servos.left_arm_servo_publishers import *
from sheldon_servos.standard_servo_positions import *
from sheldon_servos.set_servo_speed import *
from sheldon_servos.set_servo_torque import *
# Globals
def wave1():
pub_right_arm_shoulder_rotate.publish(1.2)
pub_right_arm_shoulder_lift.publish(0.0)
pub_right_arm_elbow_rotate.publish(0.0)
pub_right_arm_elbow_bend.publish(2.0)
pub_right_arm_wrist_rotate.publish(0.0)
pub_right_arm_gripper_finger.publish(0.0)
head_home() # look slightly up at people
#pub_head_sidetilt.publish(0.0)
#pub_head_tilt.publish(0.0)
#pub_head_pan.publish(0.0)
def wave2():
print("-----> wave position 2")
pub_right_arm_elbow_rotate.publish(-0.2)
def wave3():
print("-----> wave position 3")
pub_right_arm_elbow_rotate.publish(0.2)
def wave4():
print("-----> wave position 4")
pub_right_arm_elbow_rotate.publish(0.0)
class BehaviorAction(object):
_feedback = behavior_common.msg.behaviorFeedback()
_result = behavior_common.msg.behaviorResult()
def __init__(self, name):
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, behavior_common.msg.behaviorAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
rospy.loginfo('%s: Initializing Wave behavior service' % (self._action_name))
# enable/disable microphone when robot is moving servos.
# (Note system_enable vs. speech_enable vs. user_enable)
self.mic_system_enable_pub = rospy.Publisher('/microphone/system_enable', Bool, queue_size=1)
self.foo = True
def execute_cb(self, goal):
rospy.loginfo('%s: Executing behavior' % (self._action_name))
rospy.loginfo( "Param1: '%s'", goal.param1)
rospy.loginfo( "Param2: '%s'", goal.param2)
# ====== Behavior Implementation ======
success = True
r = rospy.Rate(1.0)
# initialization
rospy.loginfo("Waiting for speech server (press ctrl-c to cancel at anytime)")
client = actionlib.SimpleActionClient("/speech_service", \
audio_and_speech_common.msg.speechAction)
client.wait_for_server()
SetServoTorque(0.5, right_arm_joints)
SetServoSpeed(0.7, right_arm_joints)
SetSingleServoSpeed(1.8, 'right_arm_shoulder_rotate_joint')
self.foo = False
# mute the microphone
self.mic_system_enable_pub.publish(False)
# Move arm into start wave position
wave1()
time.sleep(2)
# say Hi
rospy.loginfo("Saying Hello")
goal = audio_and_speech_common.msg.speechGoal(text_to_speak="hello")
client.send_goal(goal)
#result = client.wait_for_result() # DONT wait for speech to complete
#rospy.loginfo("Speech goal returned result: %d", result)
# start waving while talking
wave2()
time.sleep(0.4)
wave3()
time.sleep(0.4)
wave2()
time.sleep(0.4)
wave4()
time.sleep(0.4)
# Move head and arms back to ready position
all_home()
#pub_right_arm_gripper_finger.publish(-2.0)
# Finish Behavior
for i in range(1, 5):
# check that preempt has not been requested by the client
if self._as.is_preempt_requested():
rospy.loginfo('%s: Behavior preempted' % self._action_name)
self._as.set_preempted()
success = False
break
rospy.loginfo('%s: Running behavior' % (self._action_name))
self._feedback.running = True
self._as.publish_feedback(self._feedback)
r.sleep()
if success:
rospy.loginfo('%s: Behavior complete' % self._action_name)
self._as.set_succeeded(self._result)
# un-mute the microphone
self.mic_system_enable_pub.publish(True)
if __name__ == '__main__':
rospy.init_node('wave_behavior')
server = BehaviorAction(rospy.get_name())
rospy.spin()
| StarcoderdataPython |
3338449 | import os
# --- ID:s ---
PROJECT_ID = 'project_id'
FOLDER_ID = 'folder_id'
CAMERA_ID = 'camera_id'
CLIP_ID = 'clip_id'
FILTER_ID = 'filter_id'
PROGRESS_ID = 'progress_id'
# --- Text ---
PROJECT_NAME = 'project_name'
CLIP_NAME = 'clip_name'
# --- OS related ---
FILE_PATH = 'file_path'
# --- Objects ---
PROJECTS = 'projects'
FOLDERS = 'folders'
CAMERAS = 'cameras'
# --- Lists of ID:s ---
CAMERA_IDS = 'camera_ids'
CLIP_IDS = 'clip_ids'
REMOVE_INCLUDED_CLIP_IDS = "remove_included_clips "
ADD_INCLUDED_CLIP_IDS = "add_included_clips "
REMOVE_EXCLUDED_CLIP_IDS = "remove_excluded_clips"
ADD_EXCLUDED_CLIP_IDS = "add_excluded_clips"
# --- Lists of values ---
REMOVE_BLACKLISTED_RESOLUTIONS = "remove_blacklisted_resolutions"
ADD_BLACKLISTED_RESOLUTIONS = "add_blacklisted_resolutions"
# --- Lists of text ---
ADD_CLASSES = "add_classes"
REMOVE_CLASSES = "remove_classes"
# --- Quality related ---
MIN_WIDTH = "min_width"
MIN_HEIGHT = "max_width"
MIN_FRAMERATE = " min_framerate"
# --- Time related ---
START_TIME = "start_time"
END_TIME = "end_time"
# --- Progress related ---
TOTAL = 'total'
CURRENT = 'current'
# --- Functions ---
def os_aware(data: dict) -> dict:
"""
Makes data OS aware by changing all separators in file paths to match the current operating system.
:param data: JSON data.
:return: OS aware JSON data.
"""
for key, val in data.items():
if isinstance(val, dict):
data[key] = os_aware(val)
elif isinstance(val, list):
if len(val) > 0:
if isinstance(val[0], dict):
data[key] = [os_aware(x) for x in val]
else:
data[key] = [replace_sep(x) for x in val]
else:
data[key] = replace_sep(val)
return data
def replace_sep(val):
"""
Changes separator if given input is str.
:param val: Input.
:return: Modified val.
"""
opp_sep = '/' if os.name == 'nt' else '\\' # Decide opposite separator depending on OS.
if isinstance(val, str):
val = val.replace(opp_sep, os.path.sep)
return val | StarcoderdataPython |
4905934 | AUTHOR = '<NAME>'
AUTHOR_EMAIL = '<EMAIL>'
NAME = 'tiddlywebplugins.sqlalchemy3'
DESCRIPTION = 'sqlalchemy store for tiddlyweb'
VERSION = '3.1.1' # make sure you update in __init__ too
import os
from setuptools import setup, find_packages
# You should carefully review the below (install_requires especially).
setup(
namespace_packages = ['tiddlywebplugins'],
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = open(os.path.join(os.path.dirname(__file__), 'README')).read(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = 'http://pypi.python.org/pypi/%s' % NAME,
packages = find_packages(exclude=['test']),
platforms = 'Posix; MacOS X; Windows',
install_requires = ['setuptools',
'tiddlyweb>=1.2.0',
'sqlalchemy',
'pyparsing<2.0.0',
],
zip_safe = False,
license = 'BSD'
)
| StarcoderdataPython |
11275619 | #!/usr/bin/env python
import unittest
from pycoin.ecdsa import generator_secp256k1, sign, verify, public_pair_for_secret_exponent
class ECDSATestCase(unittest.TestCase):
def test_sign_verify(self):
def do_test(secret_exponent, val_list):
public_point = public_pair_for_secret_exponent(generator_secp256k1, secret_exponent)
for v in val_list:
signature = sign(generator_secp256k1, secret_exponent, v)
r = verify(generator_secp256k1, public_point, v, signature)
# Check that the 's' value is 'low', to prevent possible transaction malleability as per
# https://github.com/bitcoin/bips/blob/master/bip-0062.mediawiki#low-s-values-in-signatures
assert signature[1] <= 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0
assert r == True
signature = signature[0],signature[1]+1
r = verify(generator_secp256k1, public_point, v, signature)
assert r == False
val_list = [100,20000,30000000,400000000000,50000000000000000,60000000000000000000000]
do_test(0x1111111111111111111111111111111111111111111111111111111111111111, val_list)
do_test(0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd, val_list)
do_test(0x47f7616ea6f9b923076625b4488115de1ef1187f760e65f89eb6f4f7ff04b012, val_list)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
393710 | # Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Tool for generating markdown documentation for a test. """
import difflib
from pathlib import Path
from typing import IO, List
from types_ import CompatTest, FidlStep, SourceStep, HLCPP, LLCPP, RUST, DART, GO
# Lines of context to show in diffs. We can be very liberal with this number
# since we already filter out the boilerplate before passing code to difflib.
DIFF_CONTEXT = 12
# Where to output the docs within the test directory
DOC_FILE = 'README.md'
# Languages to specify in markdown for syntax highlighting
MD_LANG = {
'fidl': 'fidl',
HLCPP: 'cpp',
LLCPP: 'cpp',
RUST: 'rust',
DART: 'dart',
GO: 'go',
}
def binding_title(binding: str) -> str:
""" Pretty prints a binding string for use as a markdown header. """
return binding.upper() if binding in [HLCPP, LLCPP
] else binding.capitalize()
def write_instructions(out: IO, ins: List[str]):
for i in ins:
out.write(f'- {i}\n')
out.write('\n')
def remove_boilerplate(lines: List[str]) -> List[str]:
""" Remove boilerplate lines. """
filtered = []
within_contents = False
for line in lines:
if '[START contents]' in line:
within_contents = True
elif '[END contents]' in line:
return filtered
elif within_contents:
filtered.append(line)
if not filtered:
raise RuntimeError('Did not find [START contents] tag in test file')
raise RuntimeError('Did not find [END contents] tag in test file')
def cat(out: IO, binding: str, path: Path):
""" Render contents of file at path to out. """
with open(path) as source:
lines = remove_boilerplate(source.readlines())
out.write(f'```{MD_LANG[binding]}\n{"".join(lines)}```\n')
def diff(out: IO, pre: Path, post: Path):
'''
Render a diff of pre and post to out.
'''
pre_lines = remove_boilerplate(open(pre).readlines())
post_lines = remove_boilerplate(open(post).readlines())
matcher = difflib.SequenceMatcher(
None, pre_lines, post_lines, autojunk=False)
for opcodes in matcher.get_grouped_opcodes(DIFF_CONTEXT):
out.write('```diff\n')
for tag, pre_start, pre_end, post_start, post_end in opcodes:
if tag == 'equal':
for line in pre_lines[pre_start:pre_end]:
out.write(' ' + line)
continue
if tag in {'replace', 'delete'}:
for line in pre_lines[pre_start:pre_end]:
out.write('- ' + line)
if tag in {'replace', 'insert'}:
for line in post_lines[post_start:post_end]:
out.write('+ ' + line)
out.write('\n```\n')
def generate_docs(test_root: Path, test: CompatTest, out: IO) -> str:
""" Generate transition documentation. """
out.write(
f'<!-- WARNING: This file is machine generated by the source compatibility tool. -->\n'
)
# Title
out.write(f'# {test.title}\n')
# Overview
out.write('## Overview\n')
step_nums = sorted(
{s.step_num for t in test.bindings.values() for s in t.steps})
num_steps = step_nums[-1] if step_nums else 0
step_cols = [f'|[step {i}](#step-{i})' for i in step_nums]
out.write('-|[init](#init)' + ''.join(step_cols) + '\n')
out.write('|'.join(['---'] * (2 + num_steps)) + '\n')
fidl_step_nums = {
s.step_num
for t in test.bindings.values()
for s in t.steps
if isinstance(s, FidlStep)
}
out.write('fidl|[link](#fidl-init)')
for i in step_nums:
out.write('|')
if i in fidl_step_nums:
out.write(f'[link](#fidl-{i})')
out.write('\n')
bindings = sorted(test.bindings.items())
for b, transition in bindings:
out.write(f'{b}|[link](#{b}-init)')
src_step_nums = {
s.step_num for s in transition.steps if isinstance(s, SourceStep)
}
for i in step_nums:
out.write('|')
if i in src_step_nums:
out.write(f'[link](#{b}-{i})')
out.write('\n')
out.write('\n')
# Initial FIDL
out.write('## Initial State {#init}\n')
out.write(f'### FIDL {{#fidl-init}}\n')
starting_fidl = next(iter(test.bindings.values())).starting_fidl
cat(out, 'fidl', test_root / test.fidl[starting_fidl].source)
# Initial bindings
prev_fidl = test.fidl[starting_fidl].source
prev_srcs = {}
for b, t in bindings:
out.write(f'### {binding_title(b)} {{#{b}-init}}\n')
cat(out, b, test_root / t.starting_src)
prev_srcs[b] = t.starting_src
# Transition steps
remaining_steps = {b: list(t.steps) for b, t in bindings}
current_step = 1
while any(remaining_steps.values()):
is_first_write = True
remaining_steps = {k: v for k, v in remaining_steps.items() if v}
for b in remaining_steps:
step = remaining_steps[b][0]
if step.step_num != current_step:
continue
remaining_steps[b].pop(0)
# FIDL step
if isinstance(step, FidlStep) and is_first_write:
out.write(f'## Update FIDL Library {{#step-{step.step_num}}}\n')
write_instructions(out, test.fidl[step.fidl].instructions)
source = test.fidl[step.fidl].source
diff(out, test_root / prev_fidl, test_root / source)
prev_fidl = source
is_first_write = False
# Binding step
elif isinstance(step, SourceStep):
if is_first_write:
out.write(
f'## Update Source Code {{#step-{step.step_num}}}\n')
is_first_write = False
out.write(f'### {binding_title(b)} {{#{b}-{step.step_num}}}\n')
write_instructions(out, step.instructions)
diff(out, test_root / prev_srcs[b], test_root / step.source)
prev_srcs[b] = step.source
current_step += 1
def write_docs(test_root: Path, test: CompatTest):
with open(test_root / DOC_FILE, 'w') as f:
generate_docs(test_root, test, f)
| StarcoderdataPython |
326385 | <gh_stars>10-100
"""
Test the manager a little bit.
"""
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from .fixtures import reset_textstore
from tiddlyweb import __version__
from tiddlyweb.config import config
from tiddlyweb.manage import handle
from tiddlyweb.store import Store
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.recipe import Recipe
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.user import User
BAG_STRING = """
{"desc": "hello", "policy": {}}
"""
RECIPE_STRING = """/bags/bag1/tiddlers
/bags/bag2/tiddlers
"""
TIDDLER_STRING = """modifier: cdent
Hello!
"""
def setup_module(module):
reset_textstore()
module.savedin = sys.stdin
sys.exit = boring_exit
module.store = Store(config['server_store'][0],
config['server_store'][1],
environ={'tiddlyweb.config': config})
class InternalExit(Exception):
pass
def boring_exit(value):
raise InternalExit()
def teardown_module(module):
sys.stdin = module.savedin
def test_adduser():
handle(['', u'adduser', u'cdent', u'crunk'])
the_user = User('cdent')
the_user = store.get(the_user)
assert the_user.check_password('<PASSWORD>')
def test_adduser_with_roles():
handle(['', u'adduser', u'cdent', u'crunk', u'cow', u'monkey'])
the_user = User('cdent')
the_user = store.get(the_user)
assert the_user.check_password('<PASSWORD>')
assert 'cow' in the_user.list_roles()
assert 'monkey' in the_user.list_roles()
def test_addrole():
handle(['', u'addrole', u'cdent', u'pig'])
the_user = User('cdent')
the_user = store.get(the_user)
assert 'cow' in the_user.list_roles()
def test_userpass():
handle(['', u'userpass', u'cdent', u'<PASSWORD>'])
the_user = User('cdent')
the_user = store.get(the_user)
assert the_user.check_password('<PASSWORD>')
def test_bag():
set_stdin(BAG_STRING)
handle(['', u'bag', u'bag1'])
the_bag = Bag('bag1')
the_bag = store.get(the_bag)
assert the_bag.name == 'bag1'
assert the_bag.desc == 'hello'
def test_recipe():
set_stdin(RECIPE_STRING)
handle(['', u'recipe', u'recipe1'])
the_recipe = Recipe('recipe1')
the_recipe = store.get(the_recipe)
assert the_recipe.name == 'recipe1'
assert u'bag1' in the_recipe.get_recipe()[0]
assert u'bag2' in the_recipe.get_recipe()[1]
def test_tiddler():
set_stdin(TIDDLER_STRING)
handle(['', u'tiddler', u'bag1', u'tiddler1'])
the_tiddler = Tiddler('tiddler1', 'bag1')
the_tiddler = store.get(the_tiddler)
assert the_tiddler.title == 'tiddler1'
assert the_tiddler.bag == u'bag1'
assert the_tiddler.modifier == 'cdent'
def test_info(capsys):
handle(['', 'info'])
results, err = capsys.readouterr()
assert 'current store is' in results
assert __version__ in results
def test_server(capsys):
import tiddlyweb.web.serve
def start_server(config):
print('host is %s' % config['server_host']['host'])
tiddlyweb.web.serve.start_server = start_server
handle(['', 'server'])
results, err = capsys.readouterr()
assert 'host is our_test_domain' in results
handle(['', 'server', '192.168.1.1', '8001'])
results, err = capsys.readouterr()
assert 'host is 192.168.1.1' in results
config['server_host']['host'] = 'our_test_domain'
def test_lusers(capsys):
handle(['', 'lusers'])
results, err = capsys.readouterr()
# cdent user with role monkey was created above
assert 'cdent' in results
assert 'monkey' in results
def test_lbags(capsys):
handle(['', u'lbags'])
results, err = capsys.readouterr()
assert 'Name: bag1' in results
def test_lrecipes(capsys):
handle(['', u'lrecipes'])
results, err = capsys.readouterr()
assert 'recipe1 ' in results
def test_ltiddlers(capsys):
handle(['', u'ltiddlers'])
results, err = capsys.readouterr()
assert 'bag1' in results
assert '\ttiddler1' in results
handle(['', 'ltiddlers', 'bag1'])
results, err = capsys.readouterr()
assert 'bag1' in results
assert '\ttiddler1' in results
def set_stdin(content):
f = StringIO(content)
sys.stdin = f
| StarcoderdataPython |
8196622 | <gh_stars>1-10
#<NAME> 2018
from Tkinter import *
from math import *
master = Tk()
master.title('Multiplication Animation')
def placeCoordinates():
x=0.0
y=0.0
for z in range(360):
x = 500 + scale.get() * sin((z*pi)/180) #Converting Degrees to Radians
y = 430 + scale.get() * cos((z*pi)/180) #Converting Degrees to Radians
corrdinates[z] = (x, y) #Adding Coordinate to Touple List
def draw():
for x in range(360):
value = (x * degree.get()) % 360
w.create_line(corrdinates[x][0], corrdinates[x][1], corrdinates[int(value)][0], corrdinates[int(value)][1], fill = colors[x % 69]) #Drawing Lines to the Wanted Node
def animate():
degree.set(degree.get() + 0.01)
master.after(0, draw)
master.after(0, updateCounter)
master.after(10, w.delete("all"))
master.after(10, animate)
def updateCounter():
w.create_text(500,950, text=("Factor =%s" % str(degree.get())), fill='#EFEFEF', font=("arial", 60))
colors = ["#660000", "#990000", "#CC0000", "#FF0000", "#CC3333", "#FF6666", "#FF9999", "#FFCCCC", "#663300", "#993300",
"#CC3300", "#FF3300", "#FF6600", "#FF6633", "#FF9966", "#FFCC99", "#996633", "#CC9900", "#FFCC00", "#FFFF00",
"#FFFF33", "#FFFF66", "#FFFF99", "#FFFFCC", "#003300", "#006600", "#009900", "#00CC00", "#00FF00", "#66FF66",
"#CCFFCC", "#003333", "#336666", "#009999", "#00CCCC", "#66CCCC", "#66FFCC", "#99FFCC", "#003399", "#0033FF",
"#0066FF", "#00CCFF", "#00FFFF", "#99FFFF", "#CCFFFF", "#000066", "#000099", "#0000CC", "#0000FF", "#3366FF",
"#3399FF", "#66CCFF", "#99CCFF", "#330066", "#660099", "#663399", "#9900CC", "#9933FF", "#9966FF", "#9999FF",
"#CCCCFF", "#660066", "#990066", "#CC0099", "#FF0099", "#FF00FF", "#FF66FF", "#FF99FF", "#FFCCFF"] #Rainbow Gradient: Copy Paste Values, Could have used loop to Generate
w = Canvas(master, width=1000, height=1000)
w.pack()
w.configure(background='black')
scale = IntVar()
scale.set(420)
degree = DoubleVar()
degree.set(0) #Start Value
x,y = [],[]
for i in range(360):
xcorrdinate = DoubleVar()
x.append(xcorrdinate)
ycorrdinate = DoubleVar()
y.append(ycorrdinate)
corrdinates = list(zip(x, y)) #Conflating both Lists to a List of Touple
placeCoordinates()
master.after(0, animate)
master.mainloop()
| StarcoderdataPython |
3301503 | # Some components are reused in each app. Put here for easier code readability
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
def make_navbar(active=0):
classnames = ['', '', '']
classnames[active] = "active"
navbar = dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink("Matrices", href="/page-1"),id='page-1-nav' ,className=classnames[0]),
dbc.NavItem(dbc.NavLink("Network Visualization", href="page-2"),id='page-2-nav', className=classnames[1]),
dbc.NavItem(dbc.NavLink("Network Statistics", href="page-3"),id='page-3-nav', className=classnames[2]),
],
brand="ARETE BayesTraits",
brand_href="/",
color="primary",
dark=True,
)
return navbar
| StarcoderdataPython |
8085560 | <gh_stars>1-10
# coding: utf-8
"""
Honeywell Home
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class LocationConfigurationFaceRecognition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'enabled': 'bool',
'max_persons': 'float',
'max_etas': 'float',
'max_eta_persons': 'float',
'schedules': 'list[LocationConfigurationFaceRecognitionSchedules]'
}
attribute_map = {
'enabled': 'enabled',
'max_persons': 'maxPersons',
'max_etas': 'maxEtas',
'max_eta_persons': 'maxEtaPersons',
'schedules': 'schedules'
}
def __init__(self, enabled=None, max_persons=None, max_etas=None, max_eta_persons=None, schedules=None): # noqa: E501
"""LocationConfigurationFaceRecognition - a model defined in OpenAPI""" # noqa: E501
self._enabled = None
self._max_persons = None
self._max_etas = None
self._max_eta_persons = None
self._schedules = None
self.discriminator = None
if enabled is not None:
self.enabled = enabled
if max_persons is not None:
self.max_persons = max_persons
if max_etas is not None:
self.max_etas = max_etas
if max_eta_persons is not None:
self.max_eta_persons = max_eta_persons
if schedules is not None:
self.schedules = schedules
@property
def enabled(self):
"""Gets the enabled of this LocationConfigurationFaceRecognition. # noqa: E501
:return: The enabled of this LocationConfigurationFaceRecognition. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this LocationConfigurationFaceRecognition.
:param enabled: The enabled of this LocationConfigurationFaceRecognition. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def max_persons(self):
"""Gets the max_persons of this LocationConfigurationFaceRecognition. # noqa: E501
:return: The max_persons of this LocationConfigurationFaceRecognition. # noqa: E501
:rtype: float
"""
return self._max_persons
@max_persons.setter
def max_persons(self, max_persons):
"""Sets the max_persons of this LocationConfigurationFaceRecognition.
:param max_persons: The max_persons of this LocationConfigurationFaceRecognition. # noqa: E501
:type: float
"""
self._max_persons = max_persons
@property
def max_etas(self):
"""Gets the max_etas of this LocationConfigurationFaceRecognition. # noqa: E501
:return: The max_etas of this LocationConfigurationFaceRecognition. # noqa: E501
:rtype: float
"""
return self._max_etas
@max_etas.setter
def max_etas(self, max_etas):
"""Sets the max_etas of this LocationConfigurationFaceRecognition.
:param max_etas: The max_etas of this LocationConfigurationFaceRecognition. # noqa: E501
:type: float
"""
self._max_etas = max_etas
@property
def max_eta_persons(self):
"""Gets the max_eta_persons of this LocationConfigurationFaceRecognition. # noqa: E501
:return: The max_eta_persons of this LocationConfigurationFaceRecognition. # noqa: E501
:rtype: float
"""
return self._max_eta_persons
@max_eta_persons.setter
def max_eta_persons(self, max_eta_persons):
"""Sets the max_eta_persons of this LocationConfigurationFaceRecognition.
:param max_eta_persons: The max_eta_persons of this LocationConfigurationFaceRecognition. # noqa: E501
:type: float
"""
self._max_eta_persons = max_eta_persons
@property
def schedules(self):
"""Gets the schedules of this LocationConfigurationFaceRecognition. # noqa: E501
:return: The schedules of this LocationConfigurationFaceRecognition. # noqa: E501
:rtype: list[LocationConfigurationFaceRecognitionSchedules]
"""
return self._schedules
@schedules.setter
def schedules(self, schedules):
"""Sets the schedules of this LocationConfigurationFaceRecognition.
:param schedules: The schedules of this LocationConfigurationFaceRecognition. # noqa: E501
:type: list[LocationConfigurationFaceRecognitionSchedules]
"""
self._schedules = schedules
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LocationConfigurationFaceRecognition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
9626669 | '''helpers docstring'''
import collections
def onset(f):
"""Return the 1-set as list of integers, DC not allowed"""
ret = []
for term in list(f.satisfy_all()):
ordered = collections.OrderedDict(reversed(sorted(term.items())))
literals = list(ordered.values())
tmp_str = ''
for literal in literals:
tmp_str = tmp_str + str(literal)
ret.append(int(tmp_str, 2))
return ret
def bitstring2expr(bitstrings, variable_list):
"""Converts List of Bitstrings to Boolean expressions"""
string = ''
ret_list = []
for bitstring in bitstrings:
tmp_list = []
var = 0
for character in reversed(bitstring):
if character != '-':
if character == '1':
tmp_list.append(variable_list[var])
else:
tmp_list.append('~' + variable_list[var])
var = var + 1
term = ' & '.join(tmp_list)
ret_list.append(term)
ret = ' | '.join(ret_list)
return ret
def variables(f):
"""Returns all variables if a boolean expression as list of strings"""
ret = []
for i in sorted(f.inputs):
# ret.append(str(i).replace('[','').replace(']',''))
ret.append(str(i))
return ret
| StarcoderdataPython |
9727381 | <reponame>soheltarir/grpc-django<gh_stars>10-100
from django.core.exceptions import ObjectDoesNotExist
from grpc_django.interfaces import rpc
from grpc_django.views import RetrieveGRPCView, ServerStreamGRPCView
from tests.grpc_codegen.test_pb2 import User
USERS = [{
"id": 1,
"name": "<NAME>",
"username": "bruce.wayne"
}, {
"id": 2,
"name": "<NAME>",
"username": "clary.fairchild"
}]
class UserSerializer:
def __init__(self, obj):
self.obj = obj
@property
def data(self):
return self.obj
class GetUser(RetrieveGRPCView):
response_proto = User
serializer_class = UserSerializer
def get_queryset(self):
return USERS
def get_object(self):
users = self.get_queryset()
for user in users:
if user["id"] == getattr(self.request, self.lookup_kwarg):
return user
raise ObjectDoesNotExist("User matching query does not exists.")
class ListUsers(ServerStreamGRPCView):
response_proto = User
serializer_class = UserSerializer
def get_queryset(self):
return USERS
rpcs = [
rpc("GetUser", GetUser),
rpc("ListUsers", ListUsers)
]
| StarcoderdataPython |
396580 | <filename>part7/44.py
import json
def make_new_dictionary(products_list):
new_dictionary = {}
for item in products_list:
new_dictionary[item["name"]] = [item["price"], item["quantity"]]
return new_dictionary
def main():
messages = {
"product": "\nWhat is the product name? ",
"sorry": "\nSorry, that product was not found in our inventory.",
"name": "\nName: {}",
"price": "Price: {}",
"quantity": "Quantity on hand: {}",
}
fin = open("../data/44.json", "rt", encoding="utf-8")
product_json = fin.read()
fin.close()
product_dictionary = json.loads(product_json)
new_dictionary = make_new_dictionary(product_dictionary["products"])
while True:
product_name = input(messages["product"])
if product_name in new_dictionary:
print(messages["name"].format(product_name))
print(messages["price"].format(new_dictionary[product_name][0]))
print(messages["price"].format(new_dictionary[product_name][1]))
break
print(messages["sorry"])
main()
| StarcoderdataPython |
3553058 | <filename>prisprob.py
from Prisoner import Prisoner
from Box import Box
import random
def main():
# Initializing list of prisonors
list_of_prisoners = []
for n in range(100):
list_of_prisoners.append(Prisoner(n))
# Initializing list of boxes
list_of_boxes = []
# to set up what is inside boxes
random_val_array = [n for n in range(100)]
random.shuffle(random_val_array)
# print(random_val_array)
# Initializing array of boxes
for n in range(100):
list_of_boxes.append(Box(n, random_val_array[n]))
# misc inits
false_ones = 0
correct_ones = 0
for n in range(10000):
temp = True
for n in range(100):
if (not check_if_in_loop(list_of_prisoners[n], list_of_boxes)):
temp = False
if (temp):
correct_ones = correct_ones + 1
else:
false_ones = false_ones + 1
random.shuffle(random_val_array)
for curbox in list_of_boxes:
curbox.set_val_in_box(
random_val_array[list_of_boxes.index(curbox)])
print(correct_ones / (correct_ones + false_ones))
def check_if_in_loop(prisoner, list_of_boxes):
nextbox = list_of_boxes[prisoner.id]
counter = 0
while (prisoner.id != nextbox.val_in_box):
counter = counter + 1
nextbox = list_of_boxes[nextbox.val_in_box]
return counter < 50
main()
| StarcoderdataPython |
6493281 | <gh_stars>1-10
from .SLearner import SLearner
from .TLearner import TLearner
from .XLearner import XLearner
| StarcoderdataPython |
1638960 | <reponame>SergeyShurkhovetckii/Best-Current_Python_telegram_bot<filename>bot.py
import config #Конфинурация для Telegram Bot
import requests # Модуль для обработки URL
from bs4 import BeautifulSoup as BS # Модуль для работы с HTML
import time # Модуль для остановки программы
import telebot
import emoji #Смайлики
from telebot import types
import re
# Парсер
Main = "https://kaliningrad.bankiros.ru/currency"
MAin_CB = "https://bankiros.ru/currency/cbrf"
Main_Moex ="https://bankiros.ru/currency/moex/usdrub-tod"
Main_Moex_euro = "https://bankiros.ru/currency/moex/eurrub-tod"
# Заголовки для передачи вместе с URL
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'}
#######
# Ищем доллар покупка
current_alls = requests.get(Main ,headers=headers)
current_CB = requests.get(MAin_CB,headers=headers)
current_moex_USD = requests.get(Main_Moex,headers=headers)
current_moex_EURO = requests.get(Main_Moex_euro,headers=headers)
####################
soup_current_all = BS(current_alls.content,'html.parser')
soup_current_CB = BS(current_CB.content,'html.parser')
soup_current_moex_usd = BS(current_moex_USD.content,'html.parser')
soup_current_moex_euro = BS(current_moex_EURO.content,'html.parser')
# Финальный Парсинг
conver_soup_dollars = soup_current_all.find_all("span",{"class":"conv-val triger-usd"})
conver_soup_euro = soup_current_all.find_all("span",{"class":"conv-val triger-eur"})
conver_soup_pl = soup_current_all.find_all("span",{"class":"conv-val triger-pln"})
conver_soup_time = soup_current_all.find_all("div",{"class":"actual-currency"})
conver_soup_moex_usd = soup_current_moex_usd.find_all("span",{"class":"xxx-font-size-30 xxx-text-bold"})
conver_soup_moex_usd_time = soup_current_moex_usd.find_all("span",{"class":"xxx-trading-preview__date xxx-font-size-14 xxx-text-color-darck-gray"})
conver_soup_moex_euro = soup_current_moex_euro.find_all("span",{"class":"xxx-font-size-30 xxx-text-bold"})
conver_soup_moex_euro_time = soup_current_moex_euro.find_all("span",{"class":"xxx-trading-preview__date xxx-font-size-14 xxx-text-color-darck-gray"})
# bank_name = soup_current_all.find("td",{"class":"currency-value"})
# for i in bank_name.find_all('a'):
# print(i.text)
# Переменный доллар
USD_BUY = conver_soup_dollars[0].text
USD_SELL = conver_soup_dollars[1].text
USD_CB = conver_soup_dollars[2].text
USD_TR = conver_soup_moex_usd[0].text
USD_TR_time = conver_soup_moex_usd_time[0].text
# Переменные Евро
EURO_BUY = conver_soup_euro[0].text
EURO_SELL = conver_soup_euro[1].text
EURO_CB = conver_soup_euro[2].text
EURO_TR = conver_soup_moex_euro[0].text
EURO_TR_time = conver_soup_moex_euro_time[0].text
# Переменные PLN
PL_BUY = conver_soup_pl[0].text
PL_SELL = conver_soup_pl[1].text
# Прочее
actual_time = conver_soup_time[0].text
#########################################
# Начало
bot = telebot.TeleBot(config.token)
@bot.message_handler(commands=['start'])
def get_user_info(message):
# Вывод клавиатуры Шаг 1
markup_inline =types.InlineKeyboardMarkup(row_width=1)
btn_inline_1 = types.InlineKeyboardButton(text=" Начать ",callback_data = 'current')
markup_inline.add(btn_inline_1)
bot.send_message(message.chat.id, "Привет👋🏻" + message.from_user.first_name + " я бот \n \n Моя задача показывать лучший курс валюты в Калининграде. \n \n Курс валюты по Центральному банку \n \n Курс валюты Московской биржы \n \n \n Для повторного запуска используете комманду /start или напишите в чат /start " ,reply_markup = markup_inline)
@bot.callback_query_handler(func=lambda call:True)
def answer(call):
if call.data== 'current':
# Вывод клавиатуры Шаг 1
markup_inline_step_2 =types.InlineKeyboardMarkup(row_width=3)
btn_inline_6_step_2 = types.InlineKeyboardButton(text="Обмена",callback_data = 'BB')
btn_inline_4_step_2 = types.InlineKeyboardButton(text="Курс ЦБ",callback_data = 'cb')
btn_inline_5_step_2 = types.InlineKeyboardButton(text="Курс Биржы",callback_data = 'tr')
markup_inline_step_2.add(btn_inline_4_step_2,btn_inline_5_step_2,btn_inline_6_step_2)
msg = bot.send_message(call.message.chat.id,"✅Пожалуйста выберете раздел",reply_markup = markup_inline_step_2)
# Вывод клавиатуры Шаг 2
if call.data== 'BB':
markup_inline_step_21 =types.InlineKeyboardMarkup(row_width=2)
btn_inline_1_step_21 = types.InlineKeyboardButton(text="🇺🇸 Доллар ",callback_data = 'dollars')
btn_inline_2_step_21 = types.InlineKeyboardButton(text="🇪🇺 Евро",callback_data = 'euro')
btn_inline_3_step_21 = types.InlineKeyboardButton(text="🇵🇱 PL",callback_data = 'pln')
markup_inline_step_21.add(btn_inline_1_step_21,btn_inline_2_step_21,btn_inline_3_step_21)
bot.send_message(call.message.chat.id," \n \n ✅Узнать самый выгодный курс в пунтках обмена",reply_markup = markup_inline_step_21)
# Вывод dollars Шаг 3
elif call.data =='dollars':
bot.send_message(call.message.chat.id,"🇺🇸 Покупка|Продажа \n \n☑️ {0} | {1} \n \n \n Время обновления МСК {2} ".format(USD_BUY,USD_SELL,actual_time ))
# Вывод euro Шаг 3
elif call.data =='euro':
bot.send_message(call.message.chat.id,"🇪🇺 Покупка|Продажа \n \n ☑️ {0} | {1} \n \n \n Время обновления МСК {2} ".format(EURO_BUY,EURO_SELL,actual_time ))
# Вывод злоты Шаг 3
elif call.data =='pln':
bot.send_message(call.message.chat.id,"🇵🇱 Покупка|Продажа \n \n ☑️ {0} | {1} \n \n \n Время обновления МСК {2} ".format(PL_BUY,PL_SELL,actual_time ))
# Что то другое Шаг 4
if call.data =='cb':
markup_inline_step_3 =types.InlineKeyboardMarkup(row_width=2)
btn_inline_1_step_3 = types.InlineKeyboardButton(text="🇺🇸 Доллар ",callback_data = 'dollars_cb')
btn_inline_2_step_3 = types.InlineKeyboardButton(text="🇪🇺 Евро",callback_data = 'euro_cb')
markup_inline_step_3.add(btn_inline_1_step_3,btn_inline_2_step_3)
bot.send_message(call.message.chat.id," \n \n ✅ Узнать курс по Центральному Банку ",reply_markup = markup_inline_step_3)
# Вывод dollars Шаг 4.1
elif call.data =='dollars_cb':
bot.send_message(call.message.chat.id,'\n \n'"🇺🇸 {} ".format(USD_CB))
# Вывод euro Шаг 4.2
elif call.data == 'euro_cb':
bot.send_message(call.message.chat.id,'\n \n'"🇪🇺 {} ".format(EURO_CB))
# Что то другое Шаг 5
if call.data =='tr':
markup_inline_step_4 =types.InlineKeyboardMarkup(row_width=2)
btn_inline_1_step_4 = types.InlineKeyboardButton(text="🇺🇸 Доллар ",callback_data = 'dollars_tr')
btn_inline_2_step_4 = types.InlineKeyboardButton(text="🇪🇺 Евро",callback_data = 'euro_tr')
markup_inline_step_4.add(btn_inline_1_step_4,btn_inline_2_step_4)
bot.send_message(call.message.chat.id,"\n \n ✅ Узнать курс Московской Биржы ",reply_markup = markup_inline_step_4)
# Вывод dollars Шаг 5.1
elif call.data =='dollars_tr':
bot.send_message(call.message.chat.id,'\n \n'"🇺🇸 {0} \n \n Время обновления МСК {1} ".format(USD_TR,USD_TR_time))
# Вывод euro Шаг 5.2
elif call.data == 'euro_tr':
bot.send_message(call.message.chat.id,'\n \n'"🇪🇺 {0} \n \n Время обновления МСК {1} ".format(EURO_TR,EURO_TR_time))
@bot.message_handler(commands=['help'])
def get_user_help(message):
bot.send_message(message.chat.id, "Привет👋🏻" + message.from_user.first_name + " мой создатель @S19S93 , вся информация была взята https://kaliningrad.bankiros.ru/ ")
bot.polling(none_stop=True) | StarcoderdataPython |
8056010 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import math
# from random import uniform, randint
import pygame
# from enum import Enum
import time
import rospy
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
# 定义全局变量:地图中节点的像素大小
CELL_WIDTH = 25 # 单元格宽度
CELL_HEIGHT = 25 # 单元格长度
BORDER_WIDTH = 1 # 边框宽度
REFLESH = 0.2 # 循环执行时间
WINDOW = 30 # 窗口尺寸
class Color(object):
"""
定义颜色
"""
GRID = (190, 235, 243)
OBJECT = (65, 20, 243)
END = (255, 0, 0)
BLOCK = (0, 0, 0)
@staticmethod
def random_color():
"""
设置随机颜色
"""
r = randint(0, 255)
g = randint(0, 255)
b = randint(0, 255)
return r, g, b
class Map(object):
def __init__(self, mapsize):
self.mapsize = mapsize
def generate_cell(self, cell_width, cell_height):
"""
定义一个生成器,用来生成地图中的所有节点坐标
:param cell_width: 节点宽度
:param cell_height: 节点长度
:return: 返回地图中的节点
"""
x_cell = -cell_width
for num_x in range(self.mapsize[0] // cell_width):
y_cell = -cell_height
x_cell += cell_width
for num_y in range(self.mapsize[1] // cell_height):
y_cell += cell_height
yield (x_cell, y_cell)
def transform(pos):
xnew, ynew = pos[0]*CELL_WIDTH, pos[1]*CELL_HEIGHT
return xnew, ynew
def floor_tuple(scale, pos=tuple()):
"""
将点云数据转换成适合的数据格式
:param scale: 缩放比例
:param pos: 未经离散化的连续型坐标
:return: 缩放、离散化以后的连续型坐标
"""
x = float(pos[0]/scale)
y = float(pos[1]/scale)
return tuple((math.floor(x), math.floor(y)))
class Blocklist(object):
"""
Blocklist类,接收来自点云的数据,离散化为一组元组,代表A*中的blocklist
"""
def __init__(self, scale, point_cloud_data):
"""
:param scale: 缩放比例
:param point_cloud_data: 原始点云数据
"""
self.scale = scale
self.pcd = point_cloud_data
self.blocklist = list()
def pcd_discretization(self):
for x in self.pcd:
block = floor_tuple(self.scale, x)
if block not in self.blocklist:
self.blocklist.append(block)
return self.blocklist
# return list(map(lambda x: floor_tuple(self.scale, x), self.pcd))
# 两套方案,一种不不考虑重复,另一种考虑重复
# def create_pcd():
# w = list()
# for i in range(0, 50):
# x = tuple((uniform(-30, 30), uniform(-30, 30), randint(-25, -25)))
# if x not in w:
# w.append(x)
# return w
class Visualization(object):
"""
点云数据可视化
虽然有自带软件的可视化,但是不是很满足项目的需求,我重写一个
"""
def __init__(self, blocklist, pos_now, pos_end, mapsize):
self.pos_now, self.blocklist, self.mapsize = self.change_xy(pos_now, blocklist, mapsize)
self.pos_end = pos_end
@staticmethod
def draw(mymap, screen, bl_pix, pix_now):
# 绘制屏幕中的所有单元格
for (x, y) in mymap.generate_cell(CELL_WIDTH, CELL_HEIGHT):
if (x, y) in bl_pix:
# 绘制黑色的障碍物单元格,并留出2个像素的边框
pygame.draw.rect(screen, Color.BLOCK.value,
((x+BORDER_WIDTH, y+BORDER_WIDTH),
(CELL_WIDTH-2*BORDER_WIDTH, CELL_HEIGHT-2*BORDER_WIDTH))
)
else:
# 绘制可通行单元格,并留出2个像素的边框
pygame.draw.rect(screen, Color.GRID.value,
((x+BORDER_WIDTH, y+BORDER_WIDTH),
(CELL_WIDTH-2*BORDER_WIDTH, CELL_HEIGHT-2*BORDER_WIDTH))
)
pygame.draw.circle(screen, Color.OBJECT.value,
(pix_now[0]+CELL_WIDTH//2, pix_now[1]+CELL_HEIGHT//2), CELL_WIDTH//2 - 1)
pygame.display.flip()
@staticmethod
def change_xy(pos_now, blocklist, mapsize=50): # 之前的可视化只有第一象限,现在改为四个象限,初始原点为(25,25)
mapsize = mapsize + 2
tmp = math.floor(mapsize/2)-1
pos_now = tuple((tmp+pos_now[0], tmp+pos_now[0]))
blocklist = list(map(lambda block: tuple((tmp+block[0], tmp+block[1])), blocklist))
return pos_now, blocklist, mapsize
def visual(self):
# 初始化导入Pygame模块
pygame.init()
# 此处要将地图投影大小转换为像素大小,此处设地图中每个单元格的大小为CELL_WIDTH*CELL_HEIGHT像素
mymap = Map((self.mapsize*CELL_WIDTH, self.mapsize*CELL_HEIGHT))
# 初始化显示的窗口并设置尺寸
screen = pygame.display.set_mode((WINDOW*CELL_WIDTH, WINDOW*CELL_HEIGHT))
t_end = time.time() + REFLESH
while time.time() < t_end:
pygame.display.set_caption('点云数据与目标位置演示:')
bl_pix = list(map(transform, self.blocklist)) # 转换为像素
pix_now = (self.pos_now[0]*CELL_WIDTH, self.pos_now[1]*CELL_HEIGHT)
self.draw(mymap, screen, bl_pix, pix_now)
# for event in pygame.event.get():
# pygame.display.flip()
# if event.type == pygame.QUIT:
# pygame.quit()
# return
def main(msg):
scale = 2
while True:
# point_cloud_data = create_pcd()
point_cloud_data = pc2.read_points_list(msg,field_names=("x", "y"),skip_nans=True)
# print(point_cloud_data)
create_block = Blocklist(scale, point_cloud_data)
blocklist = create_block.pcd_discretization()
# print(blocklist)
pos_now = tuple((0, 0))
pos_end = tuple((39, 39))
vis = Visualization(blocklist, pos_now, pos_end, mapsize=30)
vis.visual()
for event in pygame.event.get():
pygame.display.flip()
if event.type == pygame.QUIT:
pygame.quit()
return
if __name__ == "__main__":
rospy.init_node("fsaf")
rospy.Subscriber("/PointClouds",PointCloud2,main,queue_size=1)
| StarcoderdataPython |
6668257 | #!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
from fabric.api import *
import yaml, json, sys
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--cloud_provider', type=str,
help='Where DLab should be deployed. Available options: aws')
parser.add_argument('--infrastructure_tag', type=str, help='unique name for DLab environment')
parser.add_argument('--access_key_id', default='', type=str, help='AWS Access Key ID')
parser.add_argument('--secret_access_key', default='', type=str, help='AWS Secret Access Key')
parser.add_argument('--tag_resource_id', type=str, default='user:tag', help='The name of user tag')
parser.add_argument('--account_id', type=str, help='The ID of ASW linked account')
parser.add_argument('--billing_bucket', type=str, help='The name of bucket')
parser.add_argument('--report_path', type=str, default='', help='The path to report folder')
parser.add_argument('--mongo_password', type=str, help='The password for <PASSWORD> DB')
parser.add_argument('--dlab_dir', type=str, help='The path to dlab dir')
args = parser.parse_args()
def yml_billing(path):
try:
with open(path, 'r') as config_yml_r:
config_orig = config_yml_r.read()
if args.cloud_provider == 'aws':
config_orig = config_orig.replace('<BILLING_BUCKET_NAME>', args.billing_bucket)
config_orig = config_orig.replace('<REPORT_PATH>', args.report_path)
config_orig = config_orig.replace('<ACCOUNT_ID>', args.account_id)
config_orig = config_orig.replace('<ACCESS_KEY_ID>', args.access_key_id)
config_orig = config_orig.replace('<SECRET_ACCESS_KEY>', args.secret_access_key)
config_orig = config_orig.replace('<MONGODB_PASSWORD>', args.mongo_password)
config_orig = config_orig.replace('<CONF_TAG_RESOURCE_ID>', args.tag_resource_id)
config_orig = config_orig.replace('<CONF_SERVICE_BASE_NAME>', args.infrastructure_tag)
f = open(path, 'w')
f.write(config_orig)
f.close()
except:
print "Could not write the target file " + path
sys.exit(1)
def yml_self_service(path):
try:
with open(path, 'r') as config_yml_r:
config_orig = config_yml_r.read()
config_orig = config_orig.replace('billingSchedulerEnabled: false', 'billingSchedulerEnabled: true')
f = open(path, 'w')
f.write(config_orig)
f.close()
except:
print "Could not write the target file " + path
sys.exit(1)
##############
# Run script #
##############
if __name__ == "__main__":
print "Configure billing"
# Check cloud provider
# Access to the bucket without credentials?
try:
yml_billing(args.dlab_dir + 'conf/billing.yml')
yml_self_service(args.dlab_dir + 'conf/self-service.yml')
except:
sys.exit(1)
sys.exit(0)
| StarcoderdataPython |
8160518 | """Script to make a master catalog of 2D material thicknesses
Usage:
- Set in_file (line ) to file location of the npz catalog for desired material/substrate.
- Set out_file (line ) for how to save the master catalog for desired material/substrate.
- Set layers_id (line ) to the number of layers you can fit to."""
import numpy as np
from read_npz import npz2dict
in_file = '.\\Monolayer Search\\Graphene_on_SiO2_catalog.npz'
out_file = '.\\Monolayer Search\\Graphene_on_SiO2_master_catalog.npz'
in_file_dict = npz2dict(in_file)
flake_group_dict = {}
for key in in_file_dict:
flake_name = key[:(np.array([c for c in key])==' ').nonzero()[0][1]] ## s is a string, returns everything before the second space
if not flake_name in flake_group_dict:
flake_group_dict[flake_name] = []
flake_group_dict[flake_name].append(key)
## Dictionaries to categorize the data
weight_dict = {}
blue_mean_dict = {}
green_mean_dict = {}
red_mean_dict = {}
cov_dict = {}
layers_id = 5 ## The number of layers to test for
for tt in range(layers_id): ## Each layer contains a list, which will be populated with the training data for that layer
weight_dict[tt] = []
blue_mean_dict[tt] = []
green_mean_dict[tt] = []
red_mean_dict[tt] = []
cov_dict[tt] = []
missing_track = np.zeros(layers_id) ## Failsafe for layers that did not get trained
for ff in flake_group_dict:
for tt in range(layers_id):
try:
weight_dict[tt].append(in_file_dict[flake_group_dict[ff][0]][(in_file_dict[flake_group_dict[ff][5]]==tt).nonzero()[0][0]])
blue_mean_dict[tt].append(in_file_dict[flake_group_dict[ff][1]][(in_file_dict[flake_group_dict[ff][5]]==tt).nonzero()[0][0]])
green_mean_dict[tt].append(in_file_dict[flake_group_dict[ff][2]][(in_file_dict[flake_group_dict[ff][5]]==tt).nonzero()[0][0]])
red_mean_dict[tt].append(in_file_dict[flake_group_dict[ff][3]][(in_file_dict[flake_group_dict[ff][5]]==tt).nonzero()[0][0]])
cov_dict[tt].append(in_file_dict[flake_group_dict[ff][4]][(in_file_dict[flake_group_dict[ff][5]]==tt).nonzero()[0][0]])
except IndexError:
missing_track[tt] += 1
print(f'No layer {tt} information for {ff}.')
continue
if np.any(missing_track == len(flake_group_dict)):
layer_rej = (missing_track == len(flake_group_dict)).nonzero()[0]
for key in layer_rej:
weight_dict.pop(key)
blue_mean_dict.pop(key)
green_mean_dict.pop(key)
red_mean_dict.pop(key)
cov_dict.pop(key)
master_weights = {}
master_blue_mean = {}
master_green_mean = {}
master_red_mean = {}
master_cov = {}
for key in weight_dict:
master_weights[f'weights-{key}'] = np.mean(weight_dict[key])
master_blue_mean[f'blue mean-{key}'] = np.mean(blue_mean_dict[key])
master_green_mean[f'green mean-{key}'] = np.mean(green_mean_dict[key])
master_red_mean[f'red mean-{key}'] = np.mean(red_mean_dict[key])
master_cov[f'covariance-{key}'] = np.mean(cov_dict[key], axis=0)
with open(out_file, 'wb') as f:
np.savez(f, **master_weights, **master_blue_mean, **master_green_mean,
**master_red_mean, **master_cov)
print(master_weights)
print(master_blue_mean)
print(master_green_mean)
print(master_red_mean)
print(master_cov)
print(f'\"{out_file}\" updated.')
| StarcoderdataPython |
1649041 | from django.conf.urls import url, include
from material.frontend import urls as frontend_urls
from . import views, tinder, resume_upload, resume_retrieval, take_picture
urlpatterns = [
url(r'^review/(?P<idx>[0-9]+)/', tinder.review_page, name='review_page'),
url(r'^review', tinder.review_page, name='review_page'),
url(r'^thumbnails/(?P<hiree_id>[0-9]+)/', tinder.hiree_thumbnail_page, name='thumbnails'),
url(r'^browse', tinder.browse, name='browse'),
url(r'^upload', resume_upload.upload_resume, name='upload_resume'),
url(r'^retrieve', resume_retrieval.index, name='retrieve'),
url(r'^picture', take_picture.index, name='picture'),
url(r'^home', views.index, name='home'),
url(r'^about', views.about, name='about'),
url(r'^_imagepaths', resume_retrieval.face_list, name='face_list'),
url(r'^$', views.index, name='index')
] | StarcoderdataPython |
9691195 | <filename>src/bitcaster/utils/tests/factories.py
import datetime
import os
import random
from contextlib import ContextDecorator
from random import choice
import factory
import pytz
from django.contrib.auth.models import Group, Permission
from factory.base import FactoryMetaClass
from factory.fuzzy import FuzzyDateTime, FuzzyText
from faker import Faker
from pytz import UTC
from rest_framework.test import APIClient
import bitcaster
from bitcaster import models
from bitcaster.agents import ImapAgent
from bitcaster.dispatchers import Email
from bitcaster.framework.db.fields import ORG_ROLES
from bitcaster.models import DispatcherMetaData
from bitcaster.models.audit import AuditEvent
from bitcaster.models.token import generate_api_token
from bitcaster.security import APP_ROLES
from bitcaster.utils.reflect import fqn
whitespace = ' \t\n\r\v\f'
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = lowercase + uppercase
ascii_lowercase = lowercase
ascii_uppercase = uppercase
ascii_letters = ascii_lowercase + ascii_uppercase
faker = Faker()
def text(length, choices=ascii_letters):
""" returns a random (fixed length) string
:param length: string length
:param choices: string containing all the chars can be used to build the string
.. seealso::
:py:func:`rtext`
"""
return ''.join(choice(choices) for x in range(length))
def get_group(name=None, permissions=None):
group = GroupFactory(name=(name or text(5)))
permission_names = permissions or []
for permission_name in permission_names:
try:
app_label, codename = permission_name.split('.')
except ValueError:
raise ValueError('Invalid permission name `{0}`'.format(permission_name))
try:
permission = Permission.objects.get(content_type__app_label=app_label, codename=codename)
except Permission.DoesNotExist:
raise Permission.DoesNotExist('Permission `{0}` does not exists', permission_name)
group.permissions.add(permission)
return group
class user_grant_permissions(ContextDecorator): # noqa
caches = ['_group_perm_cache', '_user_perm_cache', '_dsspermissionchecker',
'_officepermissionchecker', '_perm_cache', '_dss_acl_cache']
def __init__(self, user, permissions=None):
self.user = user
self.permissions = permissions
self.group = None
def __enter__(self):
for cache in self.caches:
if hasattr(self.user, cache):
delattr(self.user, cache)
self.group = get_group(permissions=self.permissions or [])
self.user.groups.add(self.group)
def __exit__(self, e_typ, e_val, trcbak):
if self.group:
self.user.groups.remove(self.group)
self.group.delete()
if e_typ:
raise e_typ(e_val).with_traceback(trcbak)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
return result
def stop(self):
"""Stop an active patch."""
return self.__exit__(None, None, None)
def client_factory(user, token=None, force=False):
client = APIClient()
if force:
client.force_authenticate(user=user)
else:
assert client.login(email=user.email, password='<PASSWORD>')
client.handler._force_user = user
return client
def api_client_factory(app):
token = models.ApiAuthToken.objects.get(application=app)
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token ' + token.token)
return client
factories_registry = {}
class AutoRegisterFactoryMetaClass(FactoryMetaClass):
def __new__(mcs, class_name, bases, attrs):
new_class = super().__new__(mcs, class_name, bases, attrs)
factories_registry[new_class._meta.model] = new_class
return new_class
class AutoRegisterModelFactory(factory.DjangoModelFactory, metaclass=AutoRegisterFactoryMetaClass):
pass
class GroupFactory(AutoRegisterModelFactory):
class Meta:
model = Group
class UserFactory(AutoRegisterModelFactory):
class Meta:
model = bitcaster.models.user.User
django_get_or_create = ('email',)
name = factory.Faker('name')
country = 'IT'
language = 'en'
timezone = 'UTC'
email = factory.Sequence(lambda n: '<EMAIL>' % n)
password = '<PASSWORD>'
is_active = True
@classmethod
def _get_or_create(cls, model_class, *args, **kwargs):
organization = kwargs.pop('organization', None)
permissions = kwargs.pop('permissions', None)
addresses = kwargs.pop('addresses', None)
raw_password = kwargs.pop('password', cls.password)
extras = kwargs.pop('extras', {})
extras['index_number'] = '__%s__' % kwargs['email']
user = super()._get_or_create(model_class, extras=extras, *args, **kwargs)
if raw_password:
user.set_password(raw_password)
if permissions:
user_grant_permissions(user, permissions).start()
if addresses:
for handler, address in addresses.items():
user.addresses.create(label=handler,
address=address)
if not organization:
organization = OrganizationFactory(owner=user)
# organization.add_member(user)
return user
class MemberFactory(UserFactory):
role = ORG_ROLES.MEMBER
@classmethod
def _get_or_create(cls, model_class, *args, **kwargs):
organization = kwargs.pop('organization')
role = kwargs.pop('role')
user = super()._get_or_create(model_class, *args, **kwargs)
organization.add_member(user, role=role)
return user
class AdminFactory(UserFactory):
class Meta:
model = models.User
django_get_or_create = ('email',)
is_superuser = True
is_staff = True
@factory.post_generation
def tokens(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
ApiTokenFactory(user=self)
class OrganizationFactory(AutoRegisterModelFactory):
name = factory.Sequence(lambda n: 'Organization %03d' % n)
owner = factory.SubFactory(UserFactory)
class Meta:
model = models.Organization
django_get_or_create = ('name',)
@classmethod
def _after_postgeneration(cls, instance, create, results=None):
super()._after_postgeneration(instance, create, results)
instance.add_member(instance.owner, role=ORG_ROLES.OWNER)
class ApplicationFactory(AutoRegisterModelFactory):
class Meta:
model = models.Application
django_get_or_create = ('name',)
name = factory.Sequence(lambda n: 'Application %03d' % n)
organization = factory.SubFactory(OrganizationFactory)
# @factory.post_generation
# def members(self, create, extracted, **kwargs):
# if not create:
# # Simple build, do nothing.
# return
# u = UserFactory()
# m = OrganizationMemberFactory(organization=self.organization, user=u)
# m = ApplicationMemberFactory(application=self,
# org_member=m,
# user=u)
#
class OrganizationGroupFactory(AutoRegisterModelFactory):
organization = factory.SubFactory(OrganizationFactory)
name = factory.Faker('name')
class Meta:
model = bitcaster.models.OrganizationGroup
django_get_or_create = ('organization', 'name')
class OrganizationMemberFactory(AutoRegisterModelFactory):
organization = factory.SubFactory(OrganizationFactory)
user = factory.SubFactory(UserFactory)
class Meta:
model = bitcaster.models.OrganizationMember
django_get_or_create = ('organization', 'user')
class ApplicationMemberFactory(AutoRegisterModelFactory):
application = factory.SubFactory(ApplicationFactory)
org_member = factory.SubFactory(OrganizationMemberFactory)
role = APP_ROLES.USER
class Meta:
model = bitcaster.models.ApplicationUser
django_get_or_create = ('application', 'org_member')
class TeamFactory(AutoRegisterModelFactory):
class Meta:
model = bitcaster.models.ApplicationTeam
django_get_or_create = ('name',)
application = factory.SubFactory(ApplicationFactory)
name = factory.Faker('name')
@factory.post_generation
def memberships(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
o = OrganizationMemberFactory(organization=self.application.organization)
m = ApplicationMemberFactory(application=self.application,
org_member=o)
self.memberships.add(m)
class InvitationFactory(AutoRegisterModelFactory):
class Meta:
model = bitcaster.models.Invitation
django_get_or_create = ('target',)
organization = factory.SubFactory(OrganizationFactory)
@classmethod
def _get_or_create(cls, model_class, *args, **kwargs):
groups = kwargs.pop('groups', None)
invitation = super()._get_or_create(model_class, *args, **kwargs)
if groups:
invitation.groups.set(groups)
return invitation
#
# class ApplicationTeamFactory(AutoRegisterModelFactory):
# class Meta:
# model = bitcaster.models.Team
# django_get_or_create = ('name',)
#
# application = factory.SubFactory(ApplicationFactory)
# team = factory.SubFactory(TeamFactory)
# role = ROLES.MEMBER
class ApplicationTriggerKeyFactory(AutoRegisterModelFactory):
application = factory.SubFactory(ApplicationFactory)
token = factory.LazyAttribute(lambda s: generate_api_token())
class Meta:
model = models.ApplicationTriggerKey
django_get_or_create = ('token',)
@classmethod
def _get_or_create(cls, model_class, *args, **kwargs):
events = kwargs.pop('events', None)
key = super()._get_or_create(model_class, *args, **kwargs)
if events:
for e in events:
key.events.add(e)
return key
class ApiTokenFactory(AutoRegisterModelFactory):
application = factory.SubFactory(ApplicationFactory)
user = factory.SubFactory(UserFactory)
token = factory.LazyAttribute(lambda s: generate_api_token())
class Meta:
model = models.ApiAuthToken
django_get_or_create = ('token',)
class MonitorFactory(AutoRegisterModelFactory):
class Meta:
model = models.Monitor
django_get_or_create = ('name',)
name = factory.Sequence(lambda n: 'Monitor %03d' % n)
application = factory.SubFactory(ApplicationFactory)
handler = factory.LazyAttribute(lambda a: fqn(ImapAgent))
enabled = True
@classmethod
def _get_or_create(cls, model_class, *args, **kwargs):
if 'config' not in kwargs:
kwargs['config'] = {'event': EventFactory(application=kwargs['application']).pk,
'username': os.environ.get('TEST_MONITOR_USER', ''),
'password': <PASSWORD>.get('TEST_<PASSWORD>', ''),
'server': os.environ.get('TEST_MONITOR_SERVER', ''),
'port': os.environ.get('TEST_MONITOR_PORT', ''),
'folder': os.environ.get('TEST_MONITOR_FOLDER', ''),
'tls': os.environ.get('TEST_MONITOR_TLS', ''),
'body_regex': '',
'subject_regex': 'gerardo',
'sender_regex': '',
'to_regex': ''
}
channel = super()._get_or_create(model_class, *args, **kwargs)
return channel
class ChannelFactory(AutoRegisterModelFactory):
class Meta:
model = models.Channel
django_get_or_create = ('name',)
name = factory.Sequence(lambda n: 'Channel %03d' % n)
organization = factory.SubFactory(OrganizationFactory)
# application = factory.SubFactory(ApplicationFactory)
handler = factory.LazyAttribute(lambda a: fqn(Email))
enabled = True
deprecated = False
@classmethod
def _get_or_create(cls, model_class, *args, **kwargs):
if 'config' not in kwargs:
DispatcherMetaData.objects.update_or_create(fqn=fqn(Email),
defaults=dict(enabled=True)
)
kwargs['config'] = {'server': 'server',
'backend': 'django.core.mail.backends.locmem.EmailBackend',
'port': 9000,
'timeout': 30,
'username': 'username',
'password': 'password',
'sender': '<EMAIL>'}
channel = super()._get_or_create(model_class, *args, **kwargs)
return channel
class EventFactory(AutoRegisterModelFactory):
class Meta:
model = models.Event
django_get_or_create = ('application', 'name')
name = factory.Sequence(lambda n: 'Event %03d' % n)
application = factory.SubFactory(ApplicationFactory)
class MessageFactory(AutoRegisterModelFactory):
class Meta:
model = models.Message
django_get_or_create = ('event', 'channel',)
event = factory.SubFactory(EventFactory)
channel = factory.SubFactory(ChannelFactory)
enabled = True
language = factory.Iterator(['it', 'en', 'es', 'fr'])
subject = factory.Sequence(lambda n: 'Subject %03d' % n)
body = """Message Body
param1:{{param1}}
param2:{{param2}}
"""
@classmethod
def _get_or_create(cls, model_class, *args, **kwargs):
message = super()._get_or_create(model_class, *args, **kwargs)
message.clean()
return message
class SubscriptionFactory(AutoRegisterModelFactory):
class Meta:
model = models.Subscription
django_get_or_create = ('subscriber', 'event', 'channel')
subscriber = factory.SubFactory(UserFactory)
trigger_by = factory.SubFactory(AdminFactory)
channel = factory.SubFactory(ChannelFactory)
event = factory.SubFactory(EventFactory)
config = {}
@classmethod
def _get_or_create(cls, model_class, *args, **kwargs):
address = kwargs.pop('address', '<EMAIL>')
sub = super()._get_or_create(model_class, *args, **kwargs)
a = AddressAssignmentFactory(channel=sub.channel,
user=sub.subscriber,
verified=True,
address=AddressFactory(user=sub.subscriber,
address=address))
sub.assignment = a
sub.save()
return sub
class AddressFactory(AutoRegisterModelFactory):
class Meta:
model = models.Address
django_get_or_create = ('user', 'label')
user = factory.SubFactory(UserFactory)
label = factory.Sequence(lambda n: 'Label %03d' % n)
address = factory.Sequence(lambda n: 'Address %03d' % n)
class AddressAssignmentFactory(AutoRegisterModelFactory):
class Meta:
model = models.AddressAssignment
django_get_or_create = ('user', 'channel')
user = factory.SubFactory(UserFactory)
address = factory.SubFactory(AddressFactory)
channel = factory.SubFactory(ChannelFactory)
verified = True
class NotificationFactory(AutoRegisterModelFactory):
class Meta:
model = models.Notification
django_get_or_create = ('id',)
timestamp = FuzzyDateTime(datetime.datetime(2019, 1, 1, tzinfo=UTC))
application = factory.SubFactory(ApplicationFactory)
event = factory.SubFactory(EventFactory)
subscription = factory.SubFactory(SubscriptionFactory)
address = factory.Sequence(lambda n: 'address-%03d' % n)
channel = factory.SubFactory(ChannelFactory)
status = True
class OccurenceFactory(AutoRegisterModelFactory):
timestamp = FuzzyDateTime(datetime.datetime(2019, 1, 1, tzinfo=UTC))
# organization = factory.SubFactory(OrganizationFactory)
application = factory.SubFactory(ApplicationFactory)
event = factory.SubFactory(EventFactory)
# origin = models.GenericIPAddressField(blank=True, null=True)
token = FuzzyText(length=32)
user = factory.SubFactory(UserFactory)
# submissions = models.IntegerField(default=0,
# help_text='number of subscriptions')
# successes = models.IntegerField(default=0)
# failures = models.IntegerField(default=0)
class Meta:
model = models.Occurence
class AuditLogEntryFactory(AutoRegisterModelFactory):
class Meta:
model = models.AuditLogEntry
django_get_or_create = ('id',)
timestamp = FuzzyDateTime(datetime.datetime(2019, 1, 1, tzinfo=pytz.UTC))
# organization = factory.SubFactory(OrganizationFactory)
actor = factory.LazyAttribute(lambda a: models.User.objects.order_by('?').first())
@classmethod
def _get_or_create(cls, model_class, *args, **kwargs):
manager = cls._get_manager(model_class)
kwargs.setdefault('event', random.choice(list(AuditEvent)).value)
org = kwargs['organization']
actor = kwargs['actor']
event = kwargs['event']
family = (event % 100)
if event == AuditEvent.MEMBER_SUBSCRIBE_EVENT:
e = EventFactory(application=models.Application.objects.first())
kwargs['target_label'] = str(e)
elif family == 1:
AddressFactory(user=actor)
kwargs['target_label'] = str(actor.addresses.first())
elif family == 4:
AddressAssignmentFactory(user=actor)
kwargs['target_label'] = str(actor.assignments.first())
# elif family == 4:
# AddressAssignmentFactory(user=actor)
# kwargs['target_label'] = str(actor.assignments.first())
elif family == 5:
kwargs['target_label'] = str(org.members.order_by('?').first())
key_fields = {}
for field in cls._meta.django_get_or_create:
key_fields[field] = kwargs.pop(field)
key_fields['defaults'] = kwargs
instance, _created = manager.update_or_create(*args, **key_fields)
return instance
| StarcoderdataPython |
6519650 | """Analyse persistent homology statistics wrt. their expressivity."""
import argparse
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import euclidean_distances
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('INPUT')
parser.add_argument(
'-c',
'--column',
default='n_features',
type=str,
help='Column to use for feature calculation'
)
args = parser.parse_args()
df = pd.read_csv(
args.INPUT,
index_col='file',
dtype={
'n_features': np.int,
'total_persistence': np.float,
'infinity_norm': np.float
}
)
X = []
n_features = 0
# Figure out how long the feature vectors have to be.
for name, df_ in df.groupby('name'):
n_features = max(
n_features,
len(df_.sort_values(by='dimension')[args.column].values)
)
for name, df_ in df.groupby('name'):
# This can be seen as an equivalent to the Betti number
# calculation.
feature_vector = df_.sort_values(by='dimension')[args.column].values
feature_vector = np.pad(
feature_vector,
[(0, n_features - len(feature_vector))],
mode='constant'
)
X.append(feature_vector)
X = np.asarray(X)
D = euclidean_distances(X)
n = len(X)
# Number of graph pairs with equal feature vectors, not accounting
# for the diagonal because every graph is equal to itself.
n_equal_pairs = (np.triu(D == 0).sum()) - n
fraction_equal_pairs = n_equal_pairs / (n * (n - 1) // 2)
print(
f'{n} graphs, {n_equal_pairs} / {n * (n - 1) // 2:d} pairs '
f'({100 * fraction_equal_pairs:.2f}%)'
)
| StarcoderdataPython |
269099 | <filename>packages/augur-core/tests/reporting/test_rep_oracle.py
from eth_tester.exceptions import TransactionFailed
from utils import longToHexString, nullAddress, stringToBytes
from pytest import raises
import codecs
import functools
from old_eth_utils import sha3
def test_rep_oracle(contractsFixture, augur, cash, market, universe):
if contractsFixture.paraAugur:
return
reputationTokenAddress = universe.getReputationToken()
reputationToken = contractsFixture.applySignature('TestNetReputationToken', reputationTokenAddress)
repOracle = contractsFixture.contracts["RepOracle"]
repExchange = contractsFixture.applySignature("UniswapV2Pair", repOracle.getExchange(reputationTokenAddress))
account = contractsFixture.accounts[0]
# Initially the price will just be the initialization value
initialPrice = repOracle.genesisInitialRepPriceinAttoCash()
assert roughlyEqual(repOracle.poke(reputationTokenAddress), initialPrice)
token0IsCash = cash.address < reputationTokenAddress
# Add liquidity to suggest the price is 1 REP = 20 Cash
cashAmount = 20 * 10**18
repAmount = 1 * 10**18
addLiquidity(repExchange, cash, reputationToken, cashAmount, repAmount, account)
# The reserves have been modified, however little time has passed so the price will not have diverged much
repOracle.poke(reputationTokenAddress)
assert roughlyEqual(repOracle.poke(reputationTokenAddress), initialPrice)
# If we "mine" a block and advance the time 1/2 the period value of the oracle we should see the new value significantly closer to the price dictated by reserves. Specifically about half of the difference
period = repOracle.PERIOD()
mineBlock(contractsFixture, period / 2)
expectedNewRepPrice = initialPrice + ((cashAmount - initialPrice) / 2)
assert roughlyEqual(repOracle.poke(reputationTokenAddress), expectedNewRepPrice)
# Just calling poke again will be a no op
assert roughlyEqual(repOracle.poke(reputationTokenAddress), expectedNewRepPrice)
# If we "mine" a block after period time then the new value will simply be the price
mineBlock(contractsFixture, period)
assert roughlyEqual(repOracle.poke(reputationTokenAddress), cashAmount)
# Buy REP and manipulate blockNumber to affect cummulative amounts
cashAmount = 10**18 # Trade 1 Dai for ~.05 REP
repAmount = 4.7 * 10**16
buyRep(repExchange, cash, cashAmount, repAmount, token0IsCash, account)
mineBlock(contractsFixture, period)
expectedNewRepPrice = 22 * 10**18 # Cash reserves of ~ 21 Dai and REP reserves of ~.95 REP means a price of 22 Dai / REP
assert roughlyEqual(repOracle.poke(reputationTokenAddress), expectedNewRepPrice, 2 * 10**17)
# Now Sell REP
repAmount = 1 * 10**17 # Trade .1 REP for 1.8 DAI
cashAmount = 1.8 * 10**18
sellRep(repExchange, reputationToken, repAmount, cashAmount, token0IsCash, account)
mineBlock(contractsFixture, period)
expectedNewRepPrice = 18.2 * 10**18 # Cash reserves of ~ 19.2 Dai and REP reserves of ~1.05 REP means a price of ~18.2 Dai / REP
assert roughlyEqual(repOracle.poke(reputationTokenAddress), expectedNewRepPrice, 2 * 10**17)
def addLiquidity(exchange, cash, reputationToken, cashAmount, repAmount, address):
cash.faucet(cashAmount)
reputationToken.faucet(repAmount)
cash.transfer(exchange.address, cashAmount)
reputationToken.transfer(exchange.address, repAmount)
exchange.mint(address)
def buyRep(exchange, cash, cashAmount, repAmount, token0IsCash, address):
cash.faucet(cashAmount)
cash.transfer(exchange.address, cashAmount)
exchange.swap(0 if token0IsCash else repAmount, repAmount if token0IsCash else 0, address, "")
def sellRep(exchange, reputationToken, repAmount, cashAmount, token0IsCash, address):
reputationToken.faucet(repAmount)
reputationToken.transfer(exchange.address, repAmount)
exchange.swap(cashAmount if token0IsCash else 0, 0 if token0IsCash else cashAmount, address, "")
def mineBlock(contractsFixture, timePassed):
timestamp = contractsFixture.eth_tester.backend.chain.header.timestamp
contractsFixture.eth_tester.time_travel(int(timestamp + timePassed))
def roughlyEqual(amount1, amount2, tolerance=5 * 10**16):
return abs(amount1 - amount2) < tolerance
def is_bytes(value):
return isinstance(value, (bytes, bytearray))
def combine(f, g):
return lambda x: f(g(x))
def compose(*functions):
return functools.reduce(combine, functions, lambda x: x)
# ensure we have the *correct* sha3 installed (keccak)
assert codecs.encode(sha3(b''), 'hex') == b'c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470' # noqa
def _sub_hash(value, label):
return sha3(value + sha3(label))
def namehash(name):
node = b'\x00' * 32
if name:
if is_bytes(name):
encoded_name = name
else:
encoded_name = codecs.encode(name, 'utf8')
labels = encoded_name.split(b'.')
return compose(*(
functools.partial(_sub_hash, label=label)
for label
in labels
))(node)
return node | StarcoderdataPython |
6507197 | <reponame>esynr3z/CorSaiR
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Demostration of Register API
"""
import copy
from corsair import BitField, Register, config
# create
csr_cnt = Register('CNT', 'Counter for some events', 0x10)
# access to the attributes
csr_cnt.address = 0
print("%s.address = 0x%x" % (csr_cnt.name, csr_cnt.address))
# add single bitfield
csr_cnt.add_bitfields(BitField('EVA', 'Some event A counter',
lsb=0, width=4, reset=0x000, access='rw', hardware='oie'))
# add several bitfields
csr_cnt.add_bitfields([
BitField('EVB', 'Some event B counter',
lsb=8, width=4, reset=0x000, access='rw', hardware='oie'),
BitField('EVC', 'Some event C counter',
lsb=16, width=4, reset=0x000, access='rw', hardware='oie')
])
# you can use chaining to create register and add bit fields in one action
csr_cnt = Register('CNT', 'Counter for some events', 0x10).add_bitfields([
BitField('EVA', 'Some event A counter',
lsb=0, width=4, reset=0x000, access='rw', hardware='oie'),
BitField('EVB', 'Some event B counter',
lsb=8, width=4, reset=0x000, access='rw', hardware='oie'),
BitField('EVC', 'Some event C counter',
lsb=16, width=4, reset=0x000, access='rw', hardware='oie')
])
# print name of the bitfields
print(csr_cnt.bitfield_names)
# check equality based on all the attributes (including bitields)
csr_lena = Register('LENA', 'Length of some pulse A', 0x0)
csr_lena.add_bitfields(BitField('VAL', 'CSR value', width=32, access='rw'))
assert csr_lena != csr_cnt
csr_cnt_clone = copy.deepcopy(csr_cnt)
assert csr_cnt == csr_cnt_clone
# print as string
print(repr(csr_cnt))
print("%s" % csr_cnt)
# conversions
print(csr_cnt.as_str())
print(csr_cnt.as_dict())
# count bitfields
print("number of bitfields: %d" % len(csr_cnt))
# iterate through bitfields
for i, bf in enumerate(csr_cnt):
print("%d: %s" % (i, bf.name))
# access to the bitfield by name
print("EVB description: %s" % csr_cnt['EVB'].description)
# access to the bitfield by index (they are sorted in an ascending order of msb values)
print("EVB description: %s" % csr_cnt[1].description)
# validate the register and all it's bitfields
csr_cnt.validate()
# global configuration is important
globcfg = config.default_globcfg()
globcfg['data_width'] = 16
config.set_globcfg(globcfg)
try:
rega = Register('REGA', 'Register A', 0x10)
rega.add_bitfields(BitField('BFA', 'Bitfield A', lsb=0, width=32))
except AssertionError as e:
print(e)
# add extra properties
reg = Register(the_answer=42)
assert reg.etc['the_answer'] == 42
| StarcoderdataPython |
8054798 | <gh_stars>1-10
from setuptools import setup, find_packages
import conv1d_text_vae
long_description = '''
conv1d-text-vae
============
The Conv1D-Text-VAE is a special convolutional VAE (variational autoencoder)
for the text generation and the text semantic hashing. This package with the
sklearn-like interface, and it uses the Keras package for neural modeling and
the Fasttext models for "online" word vectorizing.
Getting Started
---------------
Installing
~~~~~~~~~~
To install this project on your local machine, you should run the
following commands in Terminal:
.. code::
git clone https://github.com/nsu-ai/conv1d-text-vae.git
cd conv1d-text-vae
sudo python setup.py install
You can also run the tests:
.. code::
python setup.py test
'''
setup(
name='conv1d-text-vae',
version=conv1d_text_vae.__version__,
packages=find_packages(exclude=['tests', 'demo']),
include_package_data=True,
description='A convolutional variational autoencoder for text generation and semantic hashing '
'with the simple sklearn-like interface',
long_description=long_description,
url='https://github.com/nsu-ai/conv1d-text-vae',
author='<NAME>',
author_email='<EMAIL>',
license='Apache License Version 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Text Processing',
'Topic :: Text Processing :: Linguistic',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords=['vae', 'conv1d', 'nlp', 'keras', 'scikit-learn', 'fasttext'],
install_requires=['annoy>=1.13', 'gensim>=3.5.0', 'h5py>=2.8.0', 'Keras>=2.2.0', 'numpy>=1.14.5', 'scipy>=1.1.0',
'nltk>=3.2.5', 'scikit-learn>=0.19.1', 'requests>=2.19.1'],
test_suite='tests'
)
| StarcoderdataPython |
6593806 | from .manager import Manager
from .entity import Entity
from .motion import Motion
from .group import Group
import numpy as np
import random
class EntityGroup(Group):
"""An entity group is a group of entities. Entity specific features are added."""
@staticmethod
def getCollided(group1, group2):
"""Determine the collisions between 2 groups."""
collisions = []
for e1 in group1:
for e2 in group2:
if (e1.position - e2.position).norm < e1.born + e2.born:
if e1.cross(e2):
collisions.append((e1, e2))
return collisions
@staticmethod
def killOnCollision(group1, group2):
"""We suppose the entities of group1 and group2 alives."""
for e1 in group1:
for e2 in group2:
if (e1.position - e2.position).norm < e1.born + e2.born:
if e1.cross(e2):
e1.die()
e2.die()
@classmethod
def randomOfType(cls, etype, n=0, **kwargs):
"""Create a group of n random entities of type 'etype'."""
entities = [etype.random() for i in range(n)]
return cls(*entities, **kwargs)
@classmethod
def randomOfTypes(cls, *types, n=0, **kwargs):
"""Create a group of n random entities of type a all of the given types."""
class etype(*types):
pass
return cls.randomOfType(etype, **kwargs)
@classmethod
def random(cls, n=10, **kwargs):
"""Create n random entities."""
entities = [Entity.random() for i in range(n)]
return cls(*entities, **kwargs)
@classmethod
def randomWithSizeSparse(self, n, size, sparse, **kwargs):
"""Create a random group using the size and sparse parameters."""
g = super().random(n, **kwargs)
g.enlarge(size)
g.spread(sparse)
return g
def __init__(self, *entities, alive=False, active=False, activate=False):
"""Create a entity group."""
super().__init__(*entities)
self.active = active
self.alive = alive
if activate:
self.activate()
# Binding the entities to the elements
entities = property(Group.getElements, Group.setElements, Group.delElements)
def randomEntity(self):
"""Return a random entity of the group."""
chosen = []
for entity in self.entities:
if isinstance(entity, EntityGroup):
chosen.append(entity.randomEntity())
else:
chosen.append(entity)
return random.choice(chosen)
def spawn(self):
"""Spawn each entity."""
self.alive = True
for entity in self:
entity.spawn()
def updateActivation(self):
"""Determine if the group is active if any of the entities is active."""
self.active = False
for entity in self:
if entity.active:
self.active = True
def activate(self):
"""Reactivate all entities."""
self.active = True
for entity in self:
entity.activate()
def deactivate(self):
"""Deactivate all entities."""
self.active = False
for entity in self:
entity.deactivate()
def reactKeyDown(self, key):
"""Make each entity react to the key down event."""
for entity in self:
if entity.active:
entity.reactKeyDown(key)
def reactMouseMotion(self, position):
"""Make each entity react to a mouse motion event."""
for entity in self:
if entity.active:
entity.reactMouseMotion(position)
def reactMouseButtonDown(self, button, position):
"""Make all entities react to a mouse button down event."""
for entity in self:
if entity.active:
entity.reactMouseButtonDown(button, position)
def respawn(self):
"""Respawn all dead entities."""
for entity in self:
entity.respawn()
def clean(self):
"""Delete all dead entities."""
i = 0
while i < len(self):
if self[i].alive:
if isinstance(self[i], EntityGroup):
self[i].clean()
i += 1
else:
del self[i]
def show(self, context):
"""Show all entities."""
for entity in self:
entity.show(context)
def showBorn(self, context):
for entity in self:
entity.showBorn(context)
def __str__(self, name=None):
"""Return the str of the types of the entities."""
if name is None:
name = type(self).__name__
return super().__str__(name)
def update(self, dt):
"""Update all entities."""
for entity in self:
entity.update(dt)
def setFriction(self, friction):
"""Set the friction of the entities to a given friction."""
for entity in self:
entity.setFriction(friction)
def enlarge(self, n):
"""Enlarge the anatomies of the entities."""
for entity in self:
entity.enlarge(n)
def spread(self, n):
"""Spread the bodies of the entities."""
for entity in self:
entity.spread(n)
def control(self, controller):
"""Return the controlled entity using the controller."""
# print(self[:])
if len(controller) > 1:
return self[controller[0]].control(controller[1:])
else:
return self[controller[0]]
class AliveEntityGroup:
"""Group of entities that handle themselves."""
@classmethod
def random(cls, n=5, np=3, nm=2, nv=2, dv=2):
"""Create a random entity group using the optional number of entities 'n'."""
entities = [Entity.random(n=np, nm=nm, nv=nv, d=dv) for i in range(n)]
entities = dict(zip(range(len(entities)), entities))
return cls(entities)
def __init__(self, entities):
"""Create a body group using the dictionary of entities."""
self.entities = entities
self.updateAlives()
self.updateMaxBorn()
def updateAlives(self):
"""Update the ids of alive entities."""
self.alives = dict([(id, entity) for (id, entity)
in self.entities.items() if entity.alive])
# Recurrent data that must be updated.
# It is better to proceed that way for efficiency
@property
def deads(self):
"""Return the ids of dead entities."""
return {k: v for k, v in self.entities.items() if k not in self.alives}
def spawnEach(self):
"""Spawn each entity."""
for entity in self.entities.values():
entity.spawn()
self.alives = self.entities.keys()
def update(self, dt):
"""Update the group."""
self.updateEach(dt)
collisions = self.getCollisionsWithCircles()
if len(collisions) > 0:
collided = self.getCollided(collisions)
if len(collided) != 0:
self.killEach(collided)
self.updateAlives()
def updateEach(self, dt):
"""Update each entity alive."""
for entity in self.alives.values():
entity.update(dt)
def showEach(self, context):
"""Show each entity alive."""
for entity in self.alives.values():
entity.show(context)
def respawnDeads(self):
"""Respawn each dead entity."""
for entity in self.deads.values():
entity.respawn()
def getCollisions(self):
"""Return the list of couples of collisions detected between alive entities."""
collisions = []
keys = list(self.alives.keys())
n = len(keys)
for i in range(n):
for j in range(i + 1, n):
id1 = keys[i]
id2 = keys[j]
e1 = self.alives[id1]
e2 = self.alives[id2]
if e1.cross(e2):
collisions.append((id1, id2))
return collisions
def getCollided(self, collisions):
"""Return the ids of collided entities."""
ids = list(set(np.reshape(collisions, 2 * len(collisions))))
return dict([(id, self.entities[id]) for id in ids])
def killEach(self, collided):
"""Kill entities with their ids."""
for entity in collided.values():
entity.die()
def spread(self, n=10):
"""Spread randomly the entities."""
for entity in self.entities.values():
entity.motion = n * Motion.random()
def followEach(self, point):
"""Make each entity follow the point."""
for entity in self.alives.values():
entity.follow(point)
def getMaxBorn(self):
"""Return the borns of all entities."""
return self._max_born
def updateMaxBorn(self, ):
"""Set the max born of all the entities."""
self._max_born = max([e.born for e in self.alives.values()])
def getCollisionsWithCircles(self):
"""Return all circle collisions."""
collisions = []
keys = list(self.alives.keys())
n = len(keys)
for i in range(n):
for j in range(i + 1, n):
id1 = keys[i]
id2 = keys[j]
e1 = self.alives[id1]
e2 = self.alives[id2]
if (e1.position - e2.position).norm < e1.born + e2.born:
if e1.cross(e2):
collisions.append((id1, id2))
return collisions
@property
def alive(self):
"""Return true if any of the entity is alive."""
return len(self.alives) != 0
@property
def dead(self):
"""Return true if all entities are dead."""
return len(self.alives) == 0
class GroupManager(Manager):
@classmethod
def random(cls, **kwargs):
"""Create a random entity group."""
group = EntityGroup.random(**kwargs)
return cls(group)
def __init__(self, group, **kwargs):
"""Create a body group manager using the group and optional arguments."""
super().__init__(**kwargs)
self.group = group
def update(self):
"""Update the group."""
collisions = self.group.getCollisions()
collided = self.group.getCollided(collisions)
self.group.killEach(collided)
self.group.updateAlives()
self.group.updateEach(self.dt)
def show(self):
"""Show the group."""
self.group.showEach(self.context)
class GroupTester(GroupManager):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.group.spread(100)
self.following = True
def update(self):
"""Update without collisions checks."""
# self.group.updateEach(self.dt)
self.updateWithCollisions()
def updateWithCollisions(self):
"""Update the group."""
self.group.followEach(self.context.point())
collisions = self.group.getCollisionsWithCircles()
if len(collisions) > 0:
self.context.console.append(collisions)
collided = self.group.getCollided(collisions)
if len(collided) != 0:
self.group.killEach(collided)
self.group.updateAlives()
self.group.updateEach(self.dt)
if __name__ == "__main__":
# bm = SpaceShipTester.random(following=True, dt=0.1)
# bm()
# gt = GroupTester.random(n=50)
# print(gt.group.alives)
# gt()
b1 = EntityGroup.random()
b2 = EntityGroup.random()
b1.enlarge(100)
print(b1 + b2)
| StarcoderdataPython |
8130463 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Tutorial 9: Basic Shading
"""
from __future__ import print_function
from OpenGL.GL import *
from OpenGL.GL.ARB import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from OpenGL.GLUT.special import *
from OpenGL.GL.shaders import *
from glew_wish import *
from csgl import *
from PIL.Image import open as pil_open
import texture as textureutils
import common
import glfw
import sys
import os
import controls
import objloader
import vboindexer
# Global window
window = None
null = c_void_p(0)
def opengl_init():
global window
# Initialize the library
if not glfw.init():
print("Failed to initialize GLFW\n",file=sys.stderr)
return False
# Open Window and create its OpenGL context
window = glfw.create_window(1024, 768, "Tutorial 09", None, None) #(in the accompanying source code this variable will be global)
glfw.window_hint(glfw.SAMPLES, 4)
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
if not window:
print("Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n",file=sys.stderr)
glfw.terminate()
return False
# Initialize GLEW
glfw.make_context_current(window)
glewExperimental = True
# GLEW is a framework for testing extension availability. Please see tutorial notes for
# more information including why can remove this code.a
if glewInit() != GLEW_OK:
print("Failed to initialize GLEW\n",file=stderropen.sys);
return False
return True
def c_type_fill(data,data_type):
rows = len(data)
cols = len(data[0])
t = rows * (cols * data_type)
tmp = t()
for r in range(rows):
for c in range(cols):
tmp[r][c] = data[r][c]
return tmp
def c_type_fill_1D(data,data_type):
rows = len(data)
t = rows * data_type
tmp = t()
for r in range(rows):
tmp[r] = data[r]
return tmp
def main():
# Initialize GLFW and open a window
if not opengl_init():
return
# Enable key events
glfw.set_input_mode(window,glfw.STICKY_KEYS,GL_TRUE)
glfw.set_cursor_pos(window, 1024/2, 768/2)
# Set opengl clear color to something other than red (color used by the fragment shader)
glClearColor(0.0,0.0,0.4,0.0)
# Enable depth test
glEnable(GL_DEPTH_TEST)
# Accept fragment if it closer to the camera than the former one
glDepthFunc(GL_LESS)
# Cull triangles which normal is not towards the camera
glEnable(GL_CULL_FACE)
vertex_array_id = glGenVertexArrays(1)
glBindVertexArray( vertex_array_id )
# Create and compile our GLSL program from the shaders
program_id = common.LoadShaders( ".\\shaders\\Tutorial9\\StandardShading.vertexshader",
".\\shaders\\Tutorial9\\StandardShading.fragmentshader" )
# Get a handle for our "MVP" uniform
matrix_id = glGetUniformLocation(program_id, "MVP")
view_matrix_id = glGetUniformLocation(program_id, "V")
model_matrix_id = glGetUniformLocation(program_id, "M")
# Load the texture
texture = textureutils.load_image(".\\content\\uvmap_suzanne.bmp")
# Get a handle for our "myTextureSampler" uniform
texture_id = glGetUniformLocation(program_id, "myTextureSampler")
# Read our OBJ file
vertices,faces,uvs,normals,colors = objloader.load(".\\content\\suzanne.obj")
vertex_data,uv_data,normal_data = objloader.process_obj( vertices,faces,uvs,normals,colors)
# Our OBJ loader uses Python lists, convert to ctype arrays before sending to OpenGL
vertex_data = objloader.generate_2d_ctypes(vertex_data)
uv_data = objloader.generate_2d_ctypes(uv_data)
normal_data = objloader.generate_2d_ctypes(normal_data)
indexed_vertices, indexed_uvs, indexed_normals, indices = vboindexer.indexVBO(vertex_data,uv_data,normal_data)
indexed_vertices = c_type_fill(indexed_vertices,GLfloat)
indexed_uvs = c_type_fill(indexed_uvs,GLfloat)
indexed_normals = c_type_fill(indexed_normals,GLfloat)
indices = c_type_fill_1D(indices,GLushort)
# Load OBJ in to a VBO
vertex_buffer = glGenBuffers(1);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer)
glBufferData(GL_ARRAY_BUFFER, len(indexed_vertices) * 4 * 3, indexed_vertices, GL_STATIC_DRAW)
uv_buffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, uv_buffer)
glBufferData(GL_ARRAY_BUFFER, len(indexed_uvs) * 4 * 2, indexed_uvs, GL_STATIC_DRAW)
normal_buffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, normal_buffer)
glBufferData(GL_ARRAY_BUFFER, len(indexed_normals) * 4 * 3, indexed_normals, GL_STATIC_DRAW)
# Generate a buffer for the indices as well
elementbuffer = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, len(indices) * 2, indices , GL_STATIC_DRAW);
# vsync and glfw do not play nice. when vsync is enabled mouse movement is jittery.
common.disable_vsyc()
# Get a handle for our "LightPosition" uniform
glUseProgram(program_id);
light_id = glGetUniformLocation(program_id, "LightPosition_worldspace");
last_time = glfw.get_time()
frames = 0
while glfw.get_key(window,glfw.KEY_ESCAPE) != glfw.PRESS and not glfw.window_should_close(window):
glClear(GL_COLOR_BUFFER_BIT| GL_DEPTH_BUFFER_BIT)
current_time = glfw.get_time()
if current_time - last_time >= 1.0:
glfw.set_window_title(window,"Tutorial 9. FPS: %d"%(frames))
frames = 0
last_time = current_time
glUseProgram(program_id)
controls.computeMatricesFromInputs(window)
ProjectionMatrix = controls.getProjectionMatrix();
ViewMatrix = controls.getViewMatrix();
ModelMatrix = mat4.identity();
mvp = ProjectionMatrix * ViewMatrix * ModelMatrix;
# Send our transformation to the currently bound shader,
# in the "MVP" uniform
glUniformMatrix4fv(matrix_id, 1, GL_FALSE,mvp.data)
glUniformMatrix4fv(model_matrix_id, 1, GL_FALSE, ModelMatrix.data);
glUniformMatrix4fv(view_matrix_id, 1, GL_FALSE, ViewMatrix.data);
lightPos = vec3(4,4,4)
glUniform3f(light_id, lightPos.x, lightPos.y, lightPos.z)
# Bind our texture in Texture Unit 0
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
# Set our "myTextureSampler" sampler to user Texture Unit 0
glUniform1i(texture_id, 0);
#1rst attribute buffer : vertices
glEnableVertexAttribArray(0)
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
glVertexAttribPointer(
0, # attribute 0. No particular reason for 0, but must match the layout in the shader.
3, # len(vertex_data)
GL_FLOAT, # type
GL_FALSE, # ormalized?
0, # stride
null # array buffer offset (c_type == void*)
)
# 2nd attribute buffer : colors
glEnableVertexAttribArray(1)
glBindBuffer(GL_ARRAY_BUFFER, uv_buffer);
glVertexAttribPointer(
1, # attribute 1. No particular reason for 1, but must match the layout in the shader.
2, # len(vertex_data)
GL_FLOAT, # type
GL_FALSE, # ormalized?
0, # stride
null # array buffer offset (c_type == void*)
)
# 3rd attribute buffer : normals
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, normal_buffer);
glVertexAttribPointer(
2, # attribute
3, # size
GL_FLOAT, # type
GL_FALSE, # ormalized?
0, # stride
null # array buffer offset (c_type == void*)
)
# Draw the triangles, vertex data now contains individual vertices
# so use array length
# glDrawArrays(GL_TRIANGLES, 0, len(vertex_data))
# Index buffer
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer)
# Draw the triangles !
glDrawElements(
GL_TRIANGLES, # mode
len(indices), # count
GL_UNSIGNED_SHORT, # type
null # element array buffer offset
)
# Not strictly necessary because we only have
glDisableVertexAttribArray(0)
glDisableVertexAttribArray(1)
glDisableVertexAttribArray(2)
# Swap front and back buffers
glfw.swap_buffers(window)
# Poll for and process events
glfw.poll_events()
frames += 1
# !Note braces around vertex_buffer and uv_buffer.
# glDeleteBuffers expects a list of buffers to delete
glDeleteBuffers(1, [vertex_buffer])
glDeleteBuffers(1, [uv_buffer])
glDeleteBuffers(1, [normal_buffer])
glDeleteProgram(program_id)
glDeleteTextures([texture_id])
glDeleteVertexArrays(1, [vertex_array_id])
glfw.terminate()
if __name__ == "__main__":
main()
| StarcoderdataPython |
12864002 | import time
from datetime import datetime
import pytest
from pnp.plugins.pull import StopPollingError
from pnp.plugins.pull.simple import CustomPolling
from . import make_runner, start_runner
@pytest.mark.asyncio
async def test_poll():
events = []
def callback(plugin, payload):
events.append(payload)
def poll():
return datetime.now()
dut = CustomPolling(name='pytest', interval="1s", scheduled_callable=poll)
assert not dut.is_cron
assert dut._poll_interval == 1
runner = await make_runner(dut, callback)
async with start_runner(runner):
time.sleep(3)
assert len(events) >= 2
@pytest.mark.asyncio
async def test_poll_for_aborting():
events = []
def callback(plugin, payload):
events.append(payload)
def poll():
raise StopPollingError()
runner = await make_runner(CustomPolling(name='pytest', interval="1s", scheduled_callable=poll), callback)
async with start_runner(runner):
time.sleep(1)
assert len(events) == 0
def test_poll_with_cron_expression():
from cronex import CronExpression
def poll():
pass
dut = CustomPolling(name='pytest', interval="*/1 * * * *", scheduled_callable=poll)
assert dut.is_cron
assert isinstance(dut._cron_interval, CronExpression)
assert dut._cron_interval.string_tab == ['*/1', '*', '*', '*', '*']
| StarcoderdataPython |
67754 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class WorkItemTypeColor(Model):
"""WorkItemTypeColor.
:param primary_color:
:type primary_color: str
:param secondary_color:
:type secondary_color: str
:param work_item_type_name:
:type work_item_type_name: str
"""
_attribute_map = {
'primary_color': {'key': 'primaryColor', 'type': 'str'},
'secondary_color': {'key': 'secondaryColor', 'type': 'str'},
'work_item_type_name': {'key': 'workItemTypeName', 'type': 'str'}
}
def __init__(self, primary_color=None, secondary_color=None, work_item_type_name=None):
super(WorkItemTypeColor, self).__init__()
self.primary_color = primary_color
self.secondary_color = secondary_color
self.work_item_type_name = work_item_type_name
| StarcoderdataPython |
1699094 | import numpy as np
import rospy
import time
import sys
import pymap3d as pm
import numba as nb
from lib_ta_py.controller_2D_TA import Controller
from pkg_ta.msg import Control
from pkg_ta.msg import State_EKF_2D
from sensor_msgs.msg import NavSatFix
from sensor_msgs.msg import Imu
yawc_compass = - np.pi/2
# Reference point
lat0, lon0, h0 = -6.8712, 107.5738, 768
@nb.njit()
def wrap_angle(angle):
return (angle + np.pi) % (2 * np.pi) - np.pi
_ = wrap_angle(1)
_ = wrap_angle(0.1)
@nb.njit()
def to_euler(x, y, z, w):
"""Dari Coursera: Return as xyz (roll pitch yaw) Euler angles."""
roll = np.arctan2(2 * (w * x + y * z), 1 - 2 * (x**2 + y**2))
pitch = np.arcsin(2 * (w * y - z * x))
yaw = np.arctan2(2 * (w * z + x * y), 1 - 2 * (y**2 + z**2))
return np.array([roll, pitch, yaw])
# Compile the to_euler
_ = to_euler(1.5352300785980803e-15, -1.3393747145983517e-15, -0.7692164172827881, 0.638988343698562)
freq = 10 # Hz
# waypoints_np = np.load('waypoints/waypoints/31_agus_wp_lurus.npy')
waypoints_np = np.load('waypoints/waypoints/08_09_wp_lurus.npy')
# waypoints_np = np.load('waypoints/waypoints/09_09_wp_S.npy')
# waypoints_np = np.load('waypoints/waypoints/09_09_wp_belok.npy')
# waypoints_np = np.load('waypoints/waypoints/09_09_wp_belok_besar.npy')
# In the Arduino, CW is positive and CCW is negative
# On the other hand, in the controller algoritm, CCW is positive and CW is negative
max_steer = 35.; min_steer = -28. # For the path following control algoritm ~ degree
max_steer_arduino = 28.; min_steer_arduino = -35. # For the Arduino ~ degree
max_brake = 2.9; max_throttle = 0.25; min_throttle = 0.0; min_throttle_move = 0.08
min_vel_move = 0.5 # m/s
kp = 0.11; ki = 0.3; kd = 0.015
ff_long = np.array([0.0, 0.0]) # no feed-forward
ks = 1.0; kv = 1.0; kff_lat = 1.7; dead_band_limit = 0.025
kv_lat = 1.0; kv_yaw = 4.0; kv_throttle = 2.5 # Speed / Throttle Additional Control
# kv_lat = 3.0; kv_yaw = 4.0; kv_throttle = 2.5 # Speed / Throttle Additional Control
kp_lat = 15. * np.pi / 180.
ki_lat = 0.5 * np.pi / 180.
kd_lat = 30. * np.pi / 180.
lat_max_int = 3. * np.pi / 180.
sat_long = np.array([-np.abs(max_brake), np.abs(max_throttle)])
sat_lat = np.array([-np.abs(min_steer), np.abs(max_steer)])
sat_lat = sat_lat * np.pi / 180.
state = {'x': 0., 'y': 0., 'yaw': 0., 'v': 0.}
RUN = False
RUN_compass = False
RUN_gnss = False
RUN_speed = False
def main():
global RUN, RUN_compass, RUN_gnss, RUN_speed
# Create the controller object
controller = Controller(kp, ki, kd, ff_long, sat_long,
ks, kv, kff_lat, dead_band_limit, sat_lat,
waypoints_np, min_vel_move, max_throttle, min_throttle_move,
kv_yaw, kv_lat, kv_throttle,
kp_lat, ki_lat, kd_lat, lat_max_int)
def callback_gnss(msg_gnss):
global state
global RUN_gnss
gnss_pos = np.array(pm.geodetic2enu(msg_gnss.latitude,
msg_gnss.longitude,
msg_gnss.altitude,
lat0, lon0, h0))
state['x'] = gnss_pos[0]
state['y'] = gnss_pos[1]
RUN_gnss = True
def callback_compass(msg_compass):
global state
global RUN_compass
q = msg_compass.orientation
euler = to_euler(q.x, q.y, q.z, q.w)
# imu_yaw = euler[-1]
state['yaw'] = wrap_angle(euler[-1] - yawc_compass)
RUN_compass = True
def callback_speed(msg_nav):
global state
global RUN_speed
state['v'] = np.sqrt(msg_nav.vx**2 + msg_nav.vy**2)
RUN_speed = True
rospy.init_node('control')
rospy.Subscriber('/fix', NavSatFix, callback_gnss)
rospy.Subscriber('/imu', Imu, callback_compass)
rospy.Subscriber('/state_2d_new', State_EKF_2D, callback_speed)
pub = rospy.Publisher('/control_signal', Control, queue_size=1)
rate = rospy.Rate(freq) # Hz
print("Menunggu data navigasi masuk pertama kali ...")
RUN = False
while not RUN:
RUN = RUN_compass and RUN_gnss and RUN_speed
time.sleep(0.02) # 20 ms
pass
print("Data Navigasi sudah masuk !")
print("Program sudah berjalan !")
msg = Control()
msg.header.frame_id = 'path_following_control'
msg.header.seq = 0
msg.header.stamp = rospy.Time.now()
last_time = msg.header.stamp.to_sec() - 1./freq
while not rospy.is_shutdown():
# Calculate the actual sampling time
msg.header.stamp = rospy.Time.now()
delta_t = msg.header.stamp.to_sec() - last_time
last_time = msg.header.stamp.to_sec()
# Calculate the control signal
long, lat = controller.calculate_control_signal(delta_t, state['x'],
state['y'], state['v'],
state['yaw'])
# Get the error profile
err = controller.get_error()
# Get the reference
ref = controller.get_instantaneous_setpoint()
# Send the message
msg.header.seq += 1
msg.action_steer = max(min(-lat*180/np.pi, max_steer_arduino), min_steer_arduino) # lat ~ radian
msg.action_throttle = max(min(long, max_throttle), min_throttle)
#msg.action_brake = max(min(-long, max_brake), 0.)
msg.action_brake = 0.
msg.error_speed = err[0]
msg.error_lateral = err[1]
msg.error_yaw = err[2]
msg.actual_x = state['x']
msg.actual_y = state['y']
msg.actual_yaw = state['yaw']
msg.actual_speed = state['v']
msg.wp_idx = controller.get_closest_index()
msg.ref_x = ref[0]
msg.ref_y = ref[1]
msg.ref_yaw = ref[2]
msg.ref_speed = ref[3]
msg.ref_curvature = ref[4]
msg.deg_ref_yaw = msg.ref_yaw * 180. / np.pi
msg.deg_actual_yaw = msg.actual_yaw * 180. / np.pi
msg.deg_error_yaw = msg.error_yaw * 180. / np.pi
pub.publish(msg)
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
11269139 | from __future__ import absolute_import
from .models import *
from .session import *
from .exceptions import *
from .chat import *
__all__ = [models.__all__ +
session.__all__ +
exceptions.__all__ +
chat.__all__]
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.9.3'
| StarcoderdataPython |
3351142 | <gh_stars>1-10
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the standardisation with mean and standard deviation.
"""
import logging
from typing import Optional, Tuple
import numpy as np
from art.config import ART_NUMPY_DTYPE
from art.preprocessing.preprocessing import Preprocessor
logger = logging.getLogger(__name__)
class StandardisationMeanStd(Preprocessor):
"""
Implement the standardisation with mean and standard deviation.
"""
params = ["mean", "std", "apply_fit", "apply_predict"]
def __init__(
self, mean: float = 0.0, std: float = 1.0, apply_fit: bool = True, apply_predict: bool = True,
):
"""
Create an instance of StandardisationMeanStd.
:param mean: Mean.
:param std: Standard Deviation.
"""
super().__init__(is_fitted=True, apply_fit=apply_fit, apply_predict=apply_predict)
self.mean = mean
self.std = std
self._check_params()
def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None,) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Apply StandardisationMeanStd inputs `x`.
:param x: Input samples to standardise of shapes `NCHW`, `NHWC`, `NCFHW` or `NFHWC`.
:param y: Label data, will not be affected by this preprocessing.
:return: Standardise input samples and unmodified labels.
"""
if x.dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
raise TypeError(
"The data type of input data `x` is {} and cannot represent negative values. Consider "
"changing the data type of the input data `x` to a type that supports negative values e.g. "
"np.float32.".format(x.dtype)
)
mean = np.asarray(self.mean, dtype=ART_NUMPY_DTYPE)
std = np.asarray(self.std, dtype=ART_NUMPY_DTYPE)
x_norm = x - mean
x_norm = x_norm / std
x_norm = x_norm.astype(ART_NUMPY_DTYPE)
return x_norm, y
def estimate_gradient(self, x: np.ndarray, gradient: np.ndarray) -> np.ndarray:
std = np.asarray(self.std, dtype=ART_NUMPY_DTYPE)
gradient_back = gradient / std
return gradient_back
def _check_params(self) -> None:
pass
def __repr__(self):
return "StandardisationMeanStd(mean={}, std={}, apply_fit={}, apply_predict={})".format(
self.mean, self.std, self.apply_fit, self.apply_predict
)
| StarcoderdataPython |
6612254 | <gh_stars>1-10
import spectrum_functions
import unittest
import numpy as np
class SpectrumFunctionsTest(unittest.TestCase):
def test2DSpecX(self):
'''an error is raised if check_spectrum is given a 2D x input'''
x = np.arange(10).reshape((2, 5))
y = np.arange(10)
with self.assertRaisesRegex(ValueError,
'x is not a 1 dimensional array'):
spectrum_functions.check_spectrum(x, y)
def test2DSpecY(self):
'''an error is raised if check_spectrum is given a 2D y input'''
x = np.arange(10)
y = np.arange(10).reshape((2, 5))
with self.assertRaisesRegex(ValueError,
'y is not a 1 dimensional array'):
spectrum_functions.check_spectrum(x, y)
def testXYlenDifferent(self):
'''an error is raised if x and y are not the same length as each
other'''
x = np.arange(10)
y = np.arange(9)
with self.assertRaisesRegex(ValueError,
'x and y are not the same length'):
spectrum_functions.check_spectrum(x, y)
def testSliceRange(self):
'''slice_range works properly in normal use case'''
x = np.arange(10)
y = np.arange(10)
start_stop = [5, 8]
self.assertTrue(np.array_equal(
np.array([5, 6, 7, 8]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeYCal(self):
'''slice_range works properly with y non-integer values'''
x = np.arange(10)
y = np.arange(10)/10.
start_stop = [0.3, 0.7]
self.assertTrue(np.array_equal(
np.array([3, 4, 5, 6, 7]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeStopOutside(self):
'''slice_range works properly when stop is greater than max(y)'''
x = np.arange(10)
y = np.arange(10)
start_stop = [5, 12]
self.assertTrue(np.array_equal(
np.array([5, 6, 7, 8, 9]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeStartOutside(self):
'''slice_range works properly when stop is greater than max(y)'''
x = np.arange(10)
y = np.arange(10)
start_stop = [-2, 5]
self.assertTrue(np.array_equal(
np.array([0, 1, 2, 3, 4, 5]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeYNegative(self):
'''slice_range works properly when y contains negative values'''
x = np.arange(10)
y = np.arange(10) - 5
start_stop = [-7, 0]
self.assertTrue(np.array_equal(
np.array([0, 1, 2, 3, 4, 5]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeStartStopDims(self):
'''raise error in slice_range if start_stop are not a 2 element list or
tuple or array'''
x = np.arange(10)
y = np.arange(10) - 5
start_stop = [-7, 0, 5]
with self.assertRaisesRegex(ValueError,
'start_stop is not a 2 element list'):
spectrum_functions.slice_range(x, start_stop, y)
def testSliceRangeYDecreasing(self):
'''slice_range works when y is monotonically decreasing'''
x = np.arange(10)
y = np.flip(np.arange(10))
start_stop = [5, 2]
self.assertTrue(np.array_equal(
np.array([4, 5, 6, 7]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testNormalize1Index(self):
'''Normalize works as expected when a single index is given'''
x = np.arange(10)
ind = 2
np.testing.assert_allclose(
x/2.,
spectrum_functions.normalize(x, ind)
)
def testNormalizeFloatIndex(self):
'''Normalize throws an error when given a float index'''
x = np.arange(10)
ind = 2.4
with self.assertRaises(ValueError):
spectrum_functions.normalize(x, ind)
def testNormalize1IndexTuple(self):
'''Normalize throws an error if a single index inside a sequence
is given'''
x = np.arange(10)
ind = [3]
with self.assertRaises(ValueError):
spectrum_functions.normalize(x, ind)
def testNormalize2Indices(self):
'''Normalize works as expected when two indices are given'''
x = np.arange(10)
ind = (2, 5)
np.testing.assert_allclose(
x/9.,
spectrum_functions.normalize(x, ind)
)
def testNormalizeMoreIndices(self):
'''Normalize raises an error if more than two indices are passed as
input'''
x = np.arange(10)
ind = (2, 5, 3)
with self.assertRaises(ValueError):
spectrum_functions.normalize(x, ind)
def testFindFWHMInt(self):
'''
find_fw finds the right fw given a simple function
'''
y = np.array([1, 1, 2, 4, 2, 1, 1])
x = np.arange(7)
fwhm = 2.
self.assertEqual(fwhm, spectrum_functions.find_fw(y, 1, 3, 0.5))
def testFindFWHMDecimal(self):
'''
find_fw finds the right fw given a simple function, answer is a fraction of the dispersion
'''
y = np.array([1, 1, 2, 5, 2, 1, 1])
x = np.arange(7)
fwhm = 5/3.
np.testing.assert_almost_equal(spectrum_functions.find_fw(y, 1, 3, 0.5), fwhm)
def testFindFWAsymmetrical(self):
'''
find_fw finds the right fw given an asymmetrical function
'''
y = np.array([1, 1, 3, 5, 2, 1, 1])
x = np.arange(7)
fwhm = 2.5 / 3 + 1 + 0.25
self.assertEqual(fwhm, spectrum_functions.find_fw(y, 1, 3, 0.5))
def testFindFWAsymmetricalRight(self):
'''
find_fw finds the right fw given an asymmetrical function, higher on the right side
'''
y = np.array([1, 1, 2, 5, 3, 1, 1])
x = np.arange(7)
fwhm = 2.5 / 3 + 1 + 0.25
self.assertEqual(fwhm, spectrum_functions.find_fw(y, 1, 3, 0.5))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1778527 | <gh_stars>0
import re
from math import inf
from heapq import heappush, heappop
from typing import List, Any
def create_cave(depth: int, tx: int, ty: int) -> List[List[int]]:
"""
Creates the cave according to the cave generation rules.
Since the cave is essentially infinite a constant size padding is applied
around the target coordinates to make the pathfinding feasible. Note that
there needs to be a padding because the optimal path can overshoot the
target. The padding size for this input was found simply by starting with
a very large value and progressively decreasing it until a value small
enough was found which produces the correct pathfinding result but is
still relatively quick to compute.
"""
PADDING = 50
cave = [[0] * (tx + PADDING) for _ in range(ty + PADDING)]
for y in range(ty + PADDING):
for x in range(tx + PADDING):
index = None
if y == 0 and x == 0:
index = 0
elif y == 0:
index = x * 16807
elif x == 0:
index = y * 48271
elif y == ty and x == tx:
index = 0
if index is None:
cave[y][x] = (cave[y-1][x] * cave[y][x-1] + depth) % 20183
else:
cave[y][x] = (index + depth) % 20183
return cave
def risk_level(cave: List[List[int]], tx: int, ty: int) -> int:
"""
Computes risk level for the smallest rectangle which contains both the
mouth of the cave and the target.
"""
risk_level = 0
for y in range(ty + 1):
for x in range(tx + 1):
risk_level += cave[y][x] % 3
return risk_level
def allowed(tool: str, region: str):
"""
Returns whether tool is allowed at region.
"""
# Rocky
if region == '.':
return tool in ('t', 'c')
# Wet
elif region == '=':
return tool in ('c', 'n')
# Narrow
else:
return tool in ('t', 'n')
def dijkstra(grid: List[List[Any]], tx: int, ty: int) -> int:
"""
Dijkstra's algorithm applied to the problem of finding the shortest
path to the target via optimal tool selection.
The algorithm works exactly like the regular Dijkstra's algorithm with a
priority queue but it needs to keep track of the equipped tool in addition
to the distance to each node as the distance to each node depends on the
tools used to reach that node.
"""
# Convert erosion levels to terrain types for easier visualization
types = ('.', '=', '|')
for y in range(len(grid)):
for x in range(len(grid[0])):
grid[y][x] = types[grid[y][x] % 3]
q = [] # Queue of (distance, x, y, tool)
heappush(q, (0, 0, 0, 't')) # Start at the mouth of the cave
distances = {(0, 0, 't'): 0} # Distance to each node with specified tool
neighbours = ((0, -1), (1, 0), (0, 1), (-1, 0)) # Neighbour vectors
# Apply Dijkstra and find the shortest route to the target
while q:
distance, x, y, tool = heappop(q)
for nx, ny in ((x + nx, y + ny) for nx, ny in neighbours):
if nx >= 0 and ny >= 0 and nx < len(grid[0]) and ny < len(grid):
for new_tool in ('t', 'c', 'n'):
# Tool needs to be valid both here and next door
if allowed(tool, grid[ny][nx]) and \
allowed(tool, grid[y][x]):
# Current tool can be kept - no time penalty
if new_tool == tool:
neighbour_dist = 1
# Current tool needs to be swapped - time penalty
else:
neighbour_dist = 8
neighbour_dist += distance
triple = (ny, nx, new_tool)
if neighbour_dist < distances.get(triple, inf):
distances[triple] = neighbour_dist
heappush(q, (neighbour_dist, nx, ny, new_tool))
return distances.get((ty, tx, 't'))
if __name__ == '__main__':
data = 'depth: 4845 target: 6,770'
pattern = r'depth: (\d+) target: (\d+),(\d+)'
match = re.match(pattern, data)
depth, tx, ty = (int(v) for v in match.groups())
cave = create_cave(depth, tx, ty)
assert risk_level(cave, tx, ty) == 5400
assert dijkstra(cave, tx, ty) == 1048
| StarcoderdataPython |
6525596 | from account import Account
import sqlite3
accounts = {}
data_path = "accounts.db"
active_account = None
con = sqlite3.connect(data_path)
c = con.cursor()
undr = 50
def check_for_accounts():
global accounts
try:
c.execute(
"""CREATE TABLE IF NOT EXISTS accounts(
name TEXT,
password TEXT,
credit INTEGER
);"""
)
con.commit()
c.execute("SELECT * FROM accounts")
accs = c.fetchall()
con.commit()
for i, account in enumerate(accs):
accounts[str(i)] = {
"name": account[0],
"password": account[1],
"credit": account[2],
}
except:
print("ERROR")
def login():
global active_account
if len(accounts) <= 0:
print("_" * undr)
print("NO ACCOUNTS FOUND, PLEASE CREATE ONE.\n", "_" * undr)
main()
return
acc = Account("", "", 0)
acc.name = input("Enter your name\n>").strip()
acc.password = input("Enter your password\n>").strip()
users = [accounts[str(x)] for x in range(0, len(accounts))]
for i, user in enumerate(users):
if user == {
"name": acc.name,
"password": <PASSWORD>,
"credit": user["credit"],
}:
acc.credit = user["credit"]
acc.index = i
active_account = acc
print("You have succesfully logged in!")
main()
return
print("_" * undr)
print("Wrong password or username, try again\n", "_" * undr)
login()
def signup():
global accounts
acc = Account("", "", 0)
while True:
name = input("Please enter your name\n>").strip()
if name == "":
print("_" * undr)
print("Name can't be empty...\n", "_" * undr)
continue
for i in range(0, len(accounts)):
if name == accounts[str(i)]["name"]:
print("_" * undr)
print(f"ACCOUNT WITH NAME: '{name}' ALREADY EXISTS\n", "_" * undr)
main()
return
acc.name = name
break
while True:
password = input("Please enter your password\n>").strip()
password_confirm = input("Please confirm your password again\n>").strip()
if password == password_confirm:
if password == "":
print("_" * undr)
print("ERR PASSWORD EMPTY\n", "_" * undr)
continue
acc.password = password
break
else:
print("_" * undr)
print("ERR PASSWORD INCORRECT\n", "_" * undr)
while True:
credit = input("How much money do you want to deposit?\n>")
if credit.isnumeric():
acc.credit = int(credit)
break
else:
print("_" * undr)
print("error credit is a number idiot.\n", "_" * undr)
if not str(len(accounts)) in accounts:
accounts[str(len(accounts))] = {
"name": acc.name,
"password": <PASSWORD>,
"credit": acc.credit,
}
else:
accounts[str(len(accounts) + 1)] = {
"name": acc.name,
"password": <PASSWORD>,
"credit": acc.credit,
}
save_data_to_file()
main()
def save_data_to_file():
c.execute("DELETE FROM accounts;")
con.commit()
c.execute("VACUUM;")
con.commit()
for i in range(0, len(accounts)):
c.execute(
"INSERT INTO accounts VALUES (:name, :password, :credit)",
{
"name": accounts[str(i)]["name"],
"password": accounts[str(i)]["password"],
"credit": accounts[str(i)]["credit"],
},
)
con.commit()
def get_operation():
res = input(">").lower().strip()
if res == "login" or res == "log":
login()
return
elif res == "signup" or res == "sign":
signup()
return
elif res == "users":
users = [accounts[str(x)] for x in range(0, len(accounts))]
names = [x["name"] for x in users]
print(names)
elif res == "help":
print(
""" Commands:
>login
>signup
>users shows users saved
>help shows this
>exit exits the ATM Machine
"""
)
elif res == "exit":
return
else:
print("_" * undr)
print("err, please see help for commands\n", "_" * undr)
get_operation()
def reorder_accounts(temp, id):
global accounts
for x in range(1, len(temp) + 1):
if x > int(id):
temp[str(id)] = temp.pop(str(x))
id = str(x)
accounts = temp
save_data_to_file()
def remove_active_account():
global active_account
acc_id = active_account.index
accounts_temp = accounts
accounts_temp.pop(str(acc_id))
reorder_accounts(accounts_temp, acc_id)
active_account = None
main()
def get_operation_loggedin():
global active_account
res = input(">").strip().lower()
if res == "deposit":
try:
amount = abs(int(input("How much do you want to deposit?\n>>>")))
x = str(int(active_account.credit) + amount)
active_account.credit = x
accounts[str(active_account.index)] = {
"name": active_account.name,
"password": <PASSWORD>,
"credit": active_account.credit,
}
save_data_to_file()
print(f"You now have {active_account.credit}$.")
except:
print("_" * undr)
print("err, please try again\n", "_" * undr)
elif res == "withdraw":
try:
amount = abs(int(input("How much do you want to withdraw?\n>>>")))
x = str(int(active_account.credit) - amount)
active_account.credit = x
accounts[str(active_account.index)] = {
"name": active_account.name,
"password": <PASSWORD>.password,
"credit": active_account.credit,
}
save_data_to_file()
print(f"You now have {active_account.credit}$.")
except:
print("_" * undr)
print("err, please try again\n", "_" * undr)
elif res == "logout":
active_account = None
elif res == "del acc":
ans = input("ARE YOU SURE? (Y/N)\n>").strip().lower()
if ans == "y":
remove_active_account()
return
elif res == "exit":
return
else:
print("_" * undr)
print("unkown command\n", "_" * undr)
main()
def main():
if active_account == None:
check_for_accounts()
print(
f"[{len(accounts)} Accounts Found], Would you like to login or signup? (login/signup/help)"
)
get_operation()
return
else:
print(
f"You have {active_account.credit}$, What would like to do? (deposit, withdraw, logout,del acc)"
)
get_operation_loggedin()
return
main()
con.close()
| StarcoderdataPython |
388719 | <reponame>louwenjjr/nplinker<filename>prototype/nplinker/nplinker.py
# Copyright 2021 The NPLinker Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
import copy
from .metabolomics import Spectrum
from .metabolomics import MolecularFamily
from .genomics import GCF, BGC
from .config import Config, Args
from .loader import DatasetLoader
from .pickler import save_pickled_data
from .scoring.methods import MetcalfScoring, RosettaScoring, TestScoring
from .scoring.methods import LinkCollection
from .logconfig import LogConfig
logger = LogConfig.getLogger(__file__)
class NPLinker(object):
# allowable types for objects to be passed to scoring methods
OBJ_CLASSES = [Spectrum, MolecularFamily, GCF, BGC]
# default set of enabled scoring methods
# TODO: ideally these shouldn't be hardcoded like this
SCORING_METHODS = {
MetcalfScoring.NAME: MetcalfScoring,
TestScoring.NAME: TestScoring,
RosettaScoring.NAME: RosettaScoring,
}
def __init__(self, userconfig=None):
"""Initialise an NPLinker instance.
NPLinker instances can be configured in multiple ways, in ascending order of priority:
1. A global user-level default configuration file in TOML format, found in the directory XDG_CONFIG_HOME/nplinker/nplinker.toml
2. A local TOML configuration file
3. Command-line arguments / supplying a manually constructed parameter dictionary
The global user-level configuration file will be created automatically the first time
an NPLinker instance is initialised if it doesn't already exist. The default file contains
sensible default values for each setting and is intended to be copied and edited to
produce dataset-specific configuration files, which will then override any parameters shared
with the user-level file. To load such a file, simply set the "userconfig" parameter to a
string containing the filename.
It's also possible to selectively override configuration file parameters by
supplying command-line arguments (if running nplinker.py as a script), or by passing
a dict with a structure corresponding to the configuration file format to this method.
Some examples may make the various possible combinations a bit clearer::
# simplest option: load a local configuration file
> npl = NPLinker('myconfig.toml')
# the same thing but running as a script
> python -m nplinker.nplinker --config "myconfig.toml"
# use the defaults from the user-level config while modifying the root path
# to load the dataset from (this is the minimum you would need to change in the
# default config file)
> npl = NPLinker({'dataset': {'root': '/path/to/dataset'}})
# the same thing running NPLinker as a script
> python nplinker.py --dataset.root /path/to/dataset
Args:
userconfig: supplies user-defined configuration data. May take one of 3 types:
- str: treat as filename of a local configuration file to load
(overriding the defaults)
- dict: contents will be used to override values in the dict generated
from loading the configuration file(s)
"""
# if userconfig is a string => create a dict with 'config' key and string as filename
# if userconfig is a dict => pass it to Config() directly
if isinstance(userconfig, str):
userconfig = {'config': userconfig}
elif not isinstance(userconfig, dict):
raise Exception('Invalid type for userconfig (should be None/str/dict, found "{}")'.format(type(userconfig)))
self._config = Config(userconfig)
# configure logging based on the supplied config params
LogConfig.setLogLevelStr(self._config.config['loglevel'])
logfile = self._config.config['logfile']
if len(logfile) > 0:
logfile_dest = logging.FileHandler(logfile)
# if we want to log to stdout plus logfile, add the new destination
if self._config.config.get('log_to_stdout', True): # default to True
LogConfig.addLogDestination(logfile_dest)
else:
# otherwise overwrite the default stdout destination
LogConfig.setLogDestination(logfile_dest)
# the DatasetLoader takes care of figuring out the locations of all the relevant files/folders
# and will show error/warning if any missing (depends if optional or not)
self._loader = DatasetLoader(self._config.config)
self._spectra = []
self._bgcs = []
self._gcfs = []
self._strains = None
self._metadata = {}
self._molfams = []
self._mibig_bgc_dict = {}
self._bgc_lookup = {}
self._spec_lookup = {}
self._scoring_methods = {}
config_methods = self._config.config.get('scoring_methods', [])
for name, method in NPLinker.SCORING_METHODS.items():
if len(config_methods) == 0 or name in config_methods:
self._scoring_methods[name] = method
logger.debug('Enabled scoring method: {}'.format(name))
self._scoring_methods_setup_complete = {name: False for name in self._scoring_methods.keys()}
self._datalinks = None
self._repro_data = {}
repro_file = self._config.config['repro_file']
if len(repro_file) > 0:
self.save_repro_data(repro_file)
def _collect_repro_data(self):
"""Creates a dict containing data to aid reproducible runs of nplinker.
This method creates a dict containing various bits of information about
the current execution of nplinker. This data will typically be saved to
a file in order to aid reproducibility using :func:`save_repro_data`.
TODO describe contents
Returns:
A dict containing the information described above
"""
self._repro_data = {}
# TODO best way to embed latest git commit hash? probably with a packaging script...
# TODO versions of all Python dependencies used (just rely on Pipfile.lock here?)
# insert command line arguments
self._repro_data['args'] = {}
for i, arg in enumerate(sys.argv):
self._repro_data['args'][i] = arg
# TODO anything else to include here?
return self._repro_data
def save_repro_data(self, filename):
self._collect_repro_data()
with open(filename, 'wb') as repro_file:
# TODO is pickle the best format to use?
save_pickled_data(self._repro_data, repro_file)
logger.info('Saving reproducibility data to {}'.format(filename))
@property
def config(self):
"""Returns a copy of the data parsed from the configuration file as a dict
Returns:
dict: configuration file parameters as a nested dict
"""
return copy.deepcopy(self._config.config)
@property
def root_dir(self):
"""Returns path to the current dataset root directory
Returns:
str: the path to the dataset root directory currently in use
"""
return self._loader._root
@property
def dataset_id(self):
"""Returns dataset "ID".
For local datasets this will just be the last component of the directory path,
e.g. /path/to/my_dataset would produce an ID of "my_dataset".
For datasets loaded from the Paired Omics platform the ID will be the platform
project ID, e.g. "MSV000079284"
Returns:
str: the dataset ID
"""
return self._loader.dataset_id
@property
def data_dir(self):
"""Returns path to nplinker/data directory (files packaged with the app itself)"""
return self._loader.datadir
@property
def gnps_params(self):
"""Returns a dict containing data from GNPS params.xml (if available).
Returns:
dict: GNPS parameters, or an empty dict if none exist in the dataset
"""
return self._loader.gnps_params
@property
def dataset_description(self):
"""Returns dataset description.
If nplinker finds a 'description.txt' file in the root directory of the
dataset, the content will be parsed and made available through this property.
Returns:
str: the content of description.txt or '<no description>'
"""
return self._loader.description_text
@property
def bigscape_cutoff(self):
"""Returns the current BiGSCAPE clustering cutoff value"""
return self._loader._bigscape_cutoff
def load_data(self, new_bigscape_cutoff=None, met_only=False):
"""Loads the basic components of a dataset.
This method is responsible for loading the various pieces of the supplied dataset into
memory and doing any initial parsing/object manipulation required. After it completes,
applications can access the lists of GCFs, Spectra, MolecularFamilies and strains
using the corresponding properties of the NPLinker class.
Returns:
bool: True if successful, False otherwise
"""
# typical case where load_data is being called with no params
if new_bigscape_cutoff is None:
logger.debug('load_data (normal case, full load, met_only={})'.format(met_only))
self._loader.validate()
if not self._loader.load(met_only=met_only):
return False
else:
logger.debug('load_data with new cutoff = {}'.format(new_bigscape_cutoff))
# 1. change the cutoff (which by itself doesn't do anything)
self._loader._bigscape_cutoff = new_bigscape_cutoff
# 2. reload the strain mappings (MiBIG filtering may have removed strains
# that were originally present, need to restore them all so the filtering
# won't break when it runs again in next stage)
self._loader._load_strain_mappings()
# 3. reload the genomics data with the new cutoff applied
self._loader._load_genomics()
self._spectra = self._loader.spectra
self._molfams = self._loader.molfams
self._bgcs = self._loader.bgcs
self._gcfs = self._loader.gcfs
self._mibig_bgc_dict = self._loader.mibig_bgc_dict
self._strains = self._loader.strains
self._product_types = self._loader.product_types
logger.debug('Generating lookup tables: genomics')
self._bgc_lookup = {}
for i, bgc in enumerate(self._bgcs):
self._bgc_lookup[bgc.name] = i
self._gcf_lookup = {}
for i, gcf in enumerate(self._gcfs):
self._gcf_lookup[gcf.gcf_id] = i
# don't need to do these two if cutoff changed (indicating genomics data
# was reloaded but not metabolomics)
if new_bigscape_cutoff is None:
logger.debug('Generating lookup tables: metabolomics')
self._spec_lookup = {}
for i, spec in enumerate(self._spectra):
self._spec_lookup[spec.spectrum_id] = i
self._molfam_lookup = {}
for i, molfam in enumerate(self._molfams):
self._molfam_lookup[molfam.id] = i
logger.debug('load_data: completed')
return True
def get_links(self, input_objects, scoring_methods, and_mode=True):
"""Find links for a set of input objects (BGCs/GCFs/Spectra/MolFams)
The input objects can be any mix of the following NPLinker types:
- BGC
- GCF
- Spectrum
- MolecularFamily
TODO longer description here
Args:
input_objects: objects to be passed to the scoring method(s).
This may be either a flat list of a uniform type (one of the 4
types above), or a list of such lists
scoring_methods: a list of one or more scoring methods to use
and_mode (bool): determines how results from multiple methods are combined.
This is ignored if a single method is supplied. If multiple methods
are used and ``and_mode`` is True, the results will only contain
links found by ALL methods. If False, results will contain links
found by ANY method.
Returns:
An instance of ``nplinker.scoring.methods.LinkCollection``
"""
if isinstance(input_objects, list) and len(input_objects) == 0:
raise Exception('input_objects length must be > 0')
if isinstance(scoring_methods, list) and len(scoring_methods) == 0:
raise Exception('scoring_methods length must be > 0')
# for convenience convert a single scoring object into a single entry list
if not isinstance(scoring_methods, list):
scoring_methods = [scoring_methods]
# check if input_objects is a list of lists. if so there should be one
# entry for each supplied method for it to be a valid parameter
if isinstance(input_objects[0], list):
if len(input_objects) != len(scoring_methods):
raise Exception('Number of input_objects lists must match number of scoring_methods (found: {}, expected: {})'.format(len(input_objects), len(scoring_methods)))
# TODO check scoring_methods only contains ScoringMethod-derived instances
# want everything to be in lists of lists
if not isinstance(input_objects, list) or (isinstance(input_objects, list) and not isinstance(input_objects[0], list)):
input_objects = [input_objects]
logger.debug('get_links: {} object sets, {} methods'.format(len(input_objects), len(scoring_methods)))
# copy the object set if required to make up the numbers
if len(input_objects) != len(scoring_methods):
if len(scoring_methods) < len(input_objects):
raise Exception('Number of scoring methods must be >= number of input object sets')
elif (len(scoring_methods) > len(input_objects)) and len(input_objects) != 1:
raise Exception('Mismatch between number of scoring methods and input objects ({} vs {})'.format(len(scoring_methods), len(input_objects)))
elif len(scoring_methods) > len(input_objects):
# this is a special case for convenience: pass in 1 set of objects and multiple methods,
# result is that set is used for all methods
logger.debug('Duplicating input object set')
while len(input_objects) < len(scoring_methods):
input_objects.append(input_objects[0])
logger.debug('Duplicating input object set')
link_collection = LinkCollection(and_mode)
for i, method in enumerate(scoring_methods):
# do any one-off initialisation required by this method
if not self._scoring_methods_setup_complete[method.name]:
logger.debug('Doing one-time setup for {}'.format(method.name))
self._scoring_methods[method.name].setup(self)
self._scoring_methods_setup_complete[method.name] = True
# should construct a dict of {object_with_link: <link_data>} entries
objects_for_method = input_objects[i]
logger.debug('Calling scoring method {} on {} objects'.format(method.name, len(objects_for_method)))
link_collection = method.get_links(objects_for_method, link_collection)
if not self._datalinks:
logger.debug('Creating internal datalinks object')
self._datalinks = self.scoring_method(MetcalfScoring.NAME).datalinks
logger.debug('Created internal datalinks object')
if len(link_collection) == 0:
logger.debug('No links found or remaining after merging all method results!')
# populate shared strain info
logger.debug('Calculating shared strain information...')
# TODO more efficient version?
for source, link_data in link_collection.links.items():
if isinstance(source, BGC):
logger.debug('Cannot determine shared strains for BGC input!')
break
targets = list(filter(lambda x: not isinstance(x, BGC), link_data.keys()))
if len(targets) > 0:
shared_strains = self._datalinks.common_strains([source], targets, True)
for objpair in shared_strains.keys():
shared_strains[objpair] = [self._strains.lookup_index(x) for x in shared_strains[objpair]]
if isinstance(source, GCF):
for target, link in link_data.items():
if (target, source) in shared_strains:
link.shared_strains = shared_strains[(target, source)]
else:
for target, link in link_data.items():
if (source, target) in shared_strains:
link.shared_strains = shared_strains[(source, target)]
logger.debug('Finished calculating shared strain information')
logger.debug('Final size of link collection is {}'.format(len(link_collection)))
return link_collection
def get_common_strains(self, objects_a, objects_b, filter_no_shared=True):
"""Retrive strains shared by arbitrary pairs of objects.
Two lists of objects are required as input. Typically one list will be
MolecularFamily or Spectrum objects and the other GCF (which list is which
doesn't matter).
The return value is a dict mapping pairs of objects to lists of Strain objects
shared by that pair. This list may be empty if ``filter_no_shared`` is False,
indicating no shared strains were found.
If ``filter_no_shared`` is True, every entry in the dict with no shared strains
will be removed before it is returned, so the only entries will be those for
which shared strains exist.
Args:
objects_a (list): a list of Spectrum/MolecularFamily/GCF objects
objects_b (list): a list of Spectrum/MolecularFamily/GCF objects
filter_no_shared (bool): if True, remove result entries for which no shared strains exist
Returns:
A dict mapping pairs of objects (obj1, obj2) to lists of Strain objects.
NOTE: The ordering of the pairs is *fixed* to be (metabolomic, genomic). In
other words, if objects_a = [GCF1, GC2, ...] and objects_b = [Spectrum1,
Spectrum2, ...], the object pairs will be (Spectrum1, GCF1), (Spectrum2,
GCF2), and so on. The same applies if objects_a and objects_b are swapped,
the metabolomic objects (Spectrum or MolecularFamily) will be the obj1
entry in each pair.
"""
if not self._datalinks:
self._datalinks = self.scoring_method(MetcalfScoring.NAME).datalinks
# this is a dict with structure:
# (Spectrum/MolecularFamily, GCF) => list of strain indices
common_strains = self._datalinks.common_strains(objects_a, objects_b, filter_no_shared)
# replace the lists of strain indices with actual strain objects
for objpair in common_strains.keys():
common_strains[objpair] = [self._strains.lookup_index(x) for x in common_strains[objpair]]
return common_strains
def has_bgc(self, name):
"""Returns True if BGC ``name`` exists in the dataset"""
return name in self._bgc_lookup
def lookup_bgc(self, name):
"""If BGC ``name`` exists, return it. Otherwise return None"""
if name not in self._bgc_lookup:
return None
return self._bgcs[self._bgc_lookup[name]]
def lookup_gcf(self, gcf_id):
"""If GCF ``gcf_id`` exists, return it. Otherwise return None"""
if gcf_id not in self._gcf_lookup:
return None
return self._gcfs[self._gcf_lookup[gcf_id]]
def lookup_spectrum(self, name):
"""If Spectrum ``name`` exists, return it. Otherwise return None"""
if name not in self._spec_lookup:
return None
return self._spectra[self._spec_lookup[name]]
@property
def strains(self):
"""Returns a list of all the strains in the dataset"""
return self._strains
@property
def bgcs(self):
"""Returns a list of all the BGCs in the dataset"""
return self._bgcs
@property
def gcfs(self):
"""Returns a list of all the GCFs in the dataset"""
return self._gcfs
@property
def spectra(self):
"""Returns a list of all the Spectra in the dataset"""
return self._spectra
@property
def molfams(self):
"""Returns a list of all the MolecularFamilies in the dataset"""
return self._molfams
@property
def metadata(self):
return self._metadata
@property
def mibig_bgc_dict(self):
return self._mibig_bgc_dict
@property
def product_types(self):
"""Returns a list of the available BiGSCAPE product types in current dataset"""
return self._product_types
@property
def repro_data(self):
"""Returns the dict containing reproducibility data"""
return self._repro_data
@property
def scoring_methods(self):
"""Returns a list of available scoring method names"""
return list(self._scoring_methods.keys())
def scoring_method(self, name):
"""Return an instance of a scoring method.
Args:
name (str): the name of the method (see :func:`scoring_methods`)
Returns:
An instance of the named scoring method class, or None if the name is invalid
"""
if name not in self._scoring_methods_setup_complete:
return None
if not self._scoring_methods_setup_complete[name]:
self._scoring_methods[name].setup(self)
self._scoring_methods_setup_complete[name] = True
return self._scoring_methods.get(name, None)(self)
if __name__ == "__main__":
# can set default logging configuration this way...
LogConfig.setLogLevel(logging.DEBUG)
# initialise NPLinker from the command-line arguments
npl = NPLinker(Args().get_args())
# load the dataset
if not npl.load_data():
print('Failed to load the dataset!')
sys.exit(-1)
# create a metcalf scoring object
mc = npl.scoring_method('metcalf')
if mc is not None:
# set a scoring cutoff threshold
mc.cutoff = 0.5
# pick some GCFs to get links for
test_gcfs = npl.gcfs[:10]
# tell nplinker to find links for this set of GCFs using metcalf scoring
results = npl.get_links(test_gcfs, mc)
# check if any links were found
if len(results) == 0:
print('No links found!')
sys.exit(0)
# the "result" object will be a LinkCollection, holding all the information
# returned by the scoring method(s) used
print('{} total links found'.format(len(results)))
# display some information about each object and its links
for obj, result in results.links.items():
print('Results for object: {}, {} total links, {} methods used'.format(obj, len(result), results.method_count))
# get links for this object, sorted by metcalf score
sorted_links = results.get_sorted_links(mc, obj)
for link_data in sorted_links:
print(' --> [{}] {} | {} | # shared_strains = {}'.format(','.join(method.name for method in link_data.methods), link_data.target, link_data[mc], len(link_data.shared_strains)))
rs = npl.scoring_method('rosetta')
if rs is not None:
print('OK')
| StarcoderdataPython |
8114239 | #!/usr/bin/env python3
import os
import pickle
import sys
def main():
if 'DATA_ROOT' not in os.environ:
print(
'Set the DATA_ROOT environment variable to the parent dir of the inria_holidays '
'directory.')
sys.exit(1)
data_root = os.environ['DATA_ROOT']
with open(f'{data_root}/inria_holidays/yolov3_person_detections.pkl', 'rb') as f:
detections_all = pickle.load(f)
filenames_without_detection = sorted([
filename for filename, detections in detections_all.items() if not detections])
with open(f'{data_root}/inria_holidays/non_person_images.txt', 'w') as f:
f.write('\n'.join(filenames_without_detection))
if __name__ == '__main__':
main()
| StarcoderdataPython |
51061 | from math import sqrt
from numpy import *
import numpy as np
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from lemma_tokenizer import LemmaTokenizer
class Bayesian_Sets(object):
def __init__(self, seeds, representation, value_type, decomposition=None):
"""
Parameters
----------
value_type : "tfidf" or "binary "
decomposition: "pca", "nmf", "lsa"
"""
self.text_type = representation
# min_df = 2: filter any token that appears in less than 2 documents
# min_df = 0.125: filter any token that apearrs in less than 0.125*number_of_docs documents
mdf = max(2/float(len(seeds)), 0.1)
# Create vectorizer
self.value_type = value_type
if value_type=="binary":
#self.vectorizer = CountVectorizer(binary=True, stop_words='english', ngram_range=(1,2))
#self.vectorizer = CountVectorizer(binary=True, stop_words='english', ngram_range=(1,2), max_df=0.75, min_df=0, max_features=1000)
self.vectorizer = CountVectorizer(binary=True, stop_words='english', ngram_range=(1,2), max_df=1.0, min_df=mdf)
elif value_type=="tfidf":
#self.vectorizer = TfidfVectorizer(tokenizer=LemmaTokenizer(), stop_words='english', ngram_range=(1,2), max_df=1.0, min_df=mdf, use_idf=False, norm='l1', sublinear_tf=True)
self.vectorizer = TfidfVectorizer(stop_words='english', ngram_range=(1,2), max_df=1.0, min_df=mdf, use_idf=False, norm='l1', sublinear_tf=False)
else:
print "Wrong value type of Bayesian Sets."
sys.exit()
self.seeds = seeds
self.vect_seeds = self._vectorize_seeds()
for i, v in enumerate(self.vect_seeds):
print self._count(v)
print "Initialized Bayesian sets object. text type = ", self.text_type
#decomposition = 'nmf' # uncomment to use decomposition. Only NMF works because bayesian sets require non-negative inputs
if decomposition=='nmf':
self.model = NMF(n_components=200, init='nndsvd', random_state=0)
print "Created nmf model"
elif decomposition == 'pca':
self.model = PCA(n_components=100)
print "Created pca model"
elif decomposition == 'lsa':
self.model = TruncatedSVD(n_components=100)
print "Created lsa model"
self.decomposition = decomposition
def _count(self, vect):
c = 0
for i in vect:
if i:
c += 1
return c
def _vectorize_seeds(self):
print "Vectorizing seed websites..."
docs = [] # list of strings, used for constructing vectorizer
for w in self.seeds:
docs.extend([p.get_text(self.text_type) for p in w])
#self.vect_seeds = self.vectorizer.fit_transform(docs).todense() # Why converting to dense vector?
self.vectorizer.fit(docs)
if self.value_type=="tfidf":
return np.array([w.get_bstf_vsm(self.vectorizer, self.text_type) for w in self.seeds])
elif self.value_type=="binary":
return np.array([w.get_bsbin_vsm(self.vectorizer, self.text_type) for w in self.seeds])
else:
print "Wrong value type"
return None
def update_seeds(self, new_seeds):
self.seeds.extend(new_seeds)
for w in self.seeds:
w.clear()
self.vect_seeds = self._vectorize_seeds()
def score(self, websites):
print "Scoring..."
if self.value_type=="tfidf":
X = np.array([w.get_bstf_vsm(self.vectorizer, self.text_type) for w in websites])
elif self.value_type=="binary":
X = np.array([w.get_bsbin_vsm(self.vectorizer, self.text_type) for w in websites])
else:
print "Wrong value type"
print 'Shape ', X.shape
if self.decomposition:
self.vect_seeds, X = self._reduce_dim(self.vect_seeds, X)
scores = self.score_helper(self.vect_seeds, X)
results = []
for i, w in enumerate(websites):
results.append((w, scores[i]))
return results
def _reduce_dim(self, T, X):
"""
Use decomposition method to reduce dimension of the two vectors T and X.
Concatenate T and X and apply decomposition to the combined vector.
"""
TX = np.concatenate((T, X), axis=0)
print "Transforming"
transformed_X = self.model.fit_transform(TX)
print "Done transform"
split = T.shape[0]
new_T, _, new_X = np.vsplit(transformed_X, (split, split))
return new_T, new_X
def _reduce_dim_separated(self, T, X):
print "Transforming"
new_T = self.model.fit_transform(T)
new_X = self.model.fit_transform(X)
print "Done transform"
return new_T, new_X
def score_helper(self, D, X) :
''' D-> Query Set
X-> Data Set'''
#Compute Bayesian Sets Parameters
c = 2
N = D.shape[0]
T = concatenate((D,X), axis=0)
m = divide(sum(T, axis=0),float(T.shape[0]))
a = multiply(m, c)
b = multiply(subtract(1,m),c)
at = add(a,sum(D, axis=0))
bt = subtract(add(b,N),sum(D, axis=0))
C = sum(subtract(add(subtract(log(add(a,b)),log(add(add(a,b),N))), log(bt)), log (b)))
q = transpose(add(subtract(subtract(log(at),log(a)),log(bt)), log(b)))
score_X = transpose(add(C, dot(X,q)))
return asarray(score_X)
| StarcoderdataPython |
378386 | # Generated by Django 3.1.2 on 2021-03-26 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0053_auto_20210326_0416'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='title',
field=models.CharField(help_text='This field will be overwritten if given a valid IMDB id.', max_length=200),
),
]
| StarcoderdataPython |
11201899 | <gh_stars>1-10
from dataclasses import dataclass
from datetime import date, datetime
from typing import Any, Optional
from .base import ZGWModel
from .catalogi import Eigenschap
from .constants import RolOmschrijving, RolTypes, VertrouwelijkheidsAanduidingen
@dataclass
class Zaak(ZGWModel):
url: str
identificatie: str
bronorganisatie: str
omschrijving: str
toelichting: str
zaaktype: str
registratiedatum: date
startdatum: date
einddatum: Optional[date]
einddatum_gepland: Optional[date]
uiterlijke_einddatum_afdoening: Optional[date]
publicatiedatum: Optional[date]
vertrouwelijkheidaanduiding: str
status: str
resultaat: str
relevante_andere_zaken: list
zaakgeometrie: dict
def get_vertrouwelijkheidaanduiding_display(self):
return VertrouwelijkheidsAanduidingen.values[self.vertrouwelijkheidaanduiding]
@dataclass
class Status(ZGWModel):
url: str
zaak: str
statustype: str
datum_status_gezet: datetime
statustoelichting: str
@dataclass
class ZaakObject(ZGWModel):
url: str
zaak: str
object: str
object_type: str
object_type_overige: str
relatieomschrijving: str
object_identificatie: Optional[dict]
@dataclass
class ZaakEigenschap(ZGWModel):
url: str
# uuid: uuid.UUID
zaak: str
eigenschap: str
naam: str
waarde: str
def get_waarde(self) -> Any:
assert isinstance(
self.eigenschap, Eigenschap
), "Ensure eigenschap has been resolved"
return self.eigenschap.to_python(self.waarde)
@dataclass
class Resultaat(ZGWModel):
url: str
zaak: str
resultaattype: str
toelichting: str
@dataclass
class Rol(ZGWModel):
url: str
zaak: str
betrokkene: str
betrokkene_type: str
roltype: str
omschrijving: str
omschrijving_generiek: str
roltoelichting: str
registratiedatum: datetime
indicatie_machtiging: str
betrokkene_identificatie: Optional[dict]
def get_betrokkene_type_display(self):
return RolTypes.values[self.betrokkene_type]
def get_omschrijving_generiek_display(self):
return RolOmschrijving.values[self.omschrijving_generiek]
| StarcoderdataPython |
4942554 | <gh_stars>0
"""
__author__ = "<NAME> and <NAME>"
Main
-Capture the config file
-Process the json config passed
-Create an agent instance
-Run the agent
"""
import argparse
from utils.logger import setup_logging
from configs.default import update_config
from configs import config
from agents import *
def parse_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument(
'--cfg',
metavar='config_json_file',
default='None',
help='The Configuration file in json format')
return parser.parse_args()
def main():
global config
args = parse_args()
config = update_config(config, args)
setup_logging(config)
# Create the Agent and pass all the configuration to it then run it..
agent_class = globals()[config.AGENT]
agent = agent_class(config)
agent.run()
agent.finalize()
if __name__ == '__main__':
main()
| StarcoderdataPython |
212118 | <gh_stars>0
import torch
import numpy as np
import torch_geometric.datasets
from ogb.graphproppred import PygGraphPropPredDataset
from ogb.lsc.pcqm4m_pyg import PygPCQM4MDataset
import pyximport
from torch_geometric.data import InMemoryDataset, download_url
import pandas as pd
from sklearn import preprocessing
pyximport.install(setup_args={'include_dirs': np.get_include()})
import os.path as osp
from torch_geometric.data import Data
import time
from torch_geometric.utils import add_self_loops, negative_sampling
import pathlib
link = pathlib.Path().resolve()
link = str(link).split('TransTTE')[0]
GLOBAL_ROOT = link + 'TransTTE'
class geo_Abakan(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None, split = 'train'):
super(geo_Abakan, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_dir + '/' + f'{split}.pt')
# self.raw_dir = '/home/jovyan/'
@property
def raw_dir(self) -> str:
return GLOBAL_ROOT + '/datasets/abakan/raw'
@property
def raw_file_names(self):
return ['abakan_full_routes_final_weather_L_NaN_filtered_FIXED.csv']
@property
def processed_file_names(self):
return ['train.pt', 'test.pt', 'val.pt']
@property
def processed_dir(self):
return osp.join(self.root, 'processed', 'data_abakan_1')
def download(self):
path = download_url(self.url, self.raw_dir)
print(self.processed_paths[0])
def process(self):
# Read data
print('start total')
start_time = time.time()
data = pd.read_csv(osp.join(GLOBAL_ROOT + '/datasets/abakan/raw', 'abakan_full_routes_final_weather_L_NaN_filtered_FIXED.csv'))
data = data[data['rebuildCount']<=1].reset_index(drop = True).copy()
shape = int(1*data.shape[0])
data = data[0:shape].copy()
data = data.drop(columns = ['Unnamed: 0'])
data['hour'] = data['start_timestamp'].apply(lambda x: int(x[-10:-8]))
# Graph
graph_columns_gran = ['edges', 'time', 'speed', 'length']
edges = ['edges']
target = ['time']
node_features_gran = ['speed', 'length']
# edge_features_agg = ['dist_to_b', 'dist_to_a', 'RTA', 'real_dist', 'pred_dist', 'rebuildCount', 'start_point_meters', 'finish_point_meters', ' start_point_part', 'finish_point_part', 'day_period', 'week_period','clouds', 'snow', 'temperature', 'wind_dir', 'wind_speed', 'pressure','hour']
edge_features_agg = [' start_point_part', 'finish_point_part', 'day_period', 'week_period', 'clouds', 'snow', 'temperature', 'wind_dir', 'wind_speed', 'pressure','hour']
# edge_features_agg = []
# dict_mean = dict()
# #### Normalization
# for col in edge_features_agg:
# dict_mean[col] = data[col].mean()
# data[col] = data[col]/data[col].mean()
# dict_mean['RTA'] = data['RTA'].mean()
# data['RTA'] = data['RTA']/data['RTA'].mean()
all_speed = []
all_length = []
for i in range(0,shape):
data_row = data[i:i+1].reset_index(drop = True).copy()
speed_list = [int(x) for x in (data_row['speed'].values[0].replace("'",'').split(','))]
list_length = [int(x) for x in (data_row['length'].values[0].replace("'",'').split(','))]
all_speed.append(speed_list)
all_length.append(list_length)
all_speed = [item for sublist in all_speed for item in sublist]
all_length = [item for sublist in all_length for item in sublist]
train_size = 0.8
test_size = 0.15
val_size = 0.05
data_split_dict = dict()
data_split_dict['train'] = np.arange(0, int(data.shape[0]*train_size))
data_split_dict['test'] = np.arange(int(data.shape[0]*train_size), int(data.shape[0]*(train_size+test_size)))
data_split_dict['val'] = np.arange(int(data.shape[0]*(train_size + test_size)),int((data.shape[0]*(train_size+test_size + val_size))))
for split in data_split_dict.keys():
# start_time = time.time()
data_list = []
for i in data_split_dict[split]:
data_row = data.iloc[[i],].reset_index(drop = True).copy()
edge_list = [int(x) for x in (data_row['edges'].values[0].replace("'",'').split(','))]
speed_list = [int(x) for x in (data_row['speed'].values[0].replace("'",'').split(','))]
list_length = [int(x) for x in (data_row['length'].values[0].replace("'",'').split(','))]
source = edge_list.copy()
target = edge_list[1:].copy() + [edge_list[0]].copy()
data_row_gran = pd.DataFrame()
data_row_gran['source'] = source
data_row_gran['target'] = target
data_row_gran['speed'] = speed_list
data_row_gran['length'] = list_length
# dict_row_mean = dict_mean.copy()
# dict_row_mean['speed'] = np.mean(speed_list)
# dict_row_mean['length'] = np.mean(list_length)
target_val = data_row['RTA'].values[0]
# data_row_gran['time'] = data_row_gran['time']/np.mean(all_time)
data_row_gran['speed'] = data_row_gran['speed']/np.mean(speed_list)
data_row_gran['length'] = data_row_gran['length']/np.mean(list_length)
for col in edge_features_agg:
data_row_gran[col] = data_row[col].values[0]
total_nodes_list = list(set(list(data_row_gran.source.values)))
le = preprocessing.LabelEncoder()
le.fit(total_nodes_list)
data_row_gran['source'] = le.transform(data_row_gran.source.values)
data_row_gran['target'] = le.transform(data_row_gran.target.values)
total_nodes_list = list(set(list(data_row_gran.source.values)))
edge_index = torch.tensor(torch.from_numpy(data_row_gran[['source','target']].values.T),dtype = torch.long)
# Define tensor of nodes features
x = torch.tensor(torch.from_numpy(data_row_gran[['speed','length'] + edge_features_agg].values),dtype = torch.long)
# Define tensor of edge features
# edge_attr = torch.tensor(torch.from_numpy(data_row_gran[edge_features_agg].values),dtype = torch.long)
# Define tensor of edge features
edge_num_feach = 1
edge_attr = torch.from_numpy(np.ones(shape = ((edge_index.size()[1]), edge_num_feach)))
edge_attr = torch.tensor(edge_attr,dtype = torch.long)
# Define tensor of targets
# y = torch.from_numpy(data_row_gran['time'].values)
y = torch.tensor(target_val,dtype = torch.long)
data_graph = Data(x=x, edge_index = edge_index, edge_attr = edge_attr, y=y)
data_list.append(data_graph)
# print("--- %s seconds ---" % (time.time() - start_time))
print('end total')
torch.save(self.collate(data_list), osp.join(self.processed_dir, f'{split}.pt')) | StarcoderdataPython |
11200927 | import numpy as np
from adventofcode.util.input_helpers import get_input_for_day
data = get_input_for_day(2021, 5)
segments = np.array(
[[pair.split(",") for pair in line.split(" -> ")] for line in data]
).astype(int)
xmax = segments[:, :, 0].max()
ymax = segments[:, :, 1].max()
class Line:
def __init__(self):
self.line = None
class StraightLine(Line):
def __init__(self, coords):
self.line = list()
x_coords = (coords[0, 0], coords[1, 0])
y_coords = (coords[0, 1], coords[1, 1])
for x in range(min(x_coords), 1 + max(x_coords)):
for y in range(min(y_coords), 1 + max(y_coords)):
self.line.append([x, y])
class DiagonalLine(Line):
def __init__(self, coords):
self.line = list()
x_coords = (coords[0, 0], coords[1, 0])
y_coords = (coords[0, 1], coords[1, 1])
delta_x = 1 if x_coords[1] > x_coords[0] else -1
delta_y = 1 if y_coords[1] > y_coords[0] else -1
for i in range(1 + abs(x_coords[1] - x_coords[0])):
self.line.append([x_coords[0] + i * delta_x, y_coords[0] + i * delta_y])
class Board:
def __init__(self, limits):
self.limits = limits
self.board = np.zeros((limits[0], limits[1]), dtype=int)
def draw_line(self, line):
for location in line.line:
self.board[location[0], location[1]] += 1
def overlaps(self):
return len(np.where(self.board > 1)[0])
def part1():
board = Board((1 + xmax, 1 + ymax))
for seg in segments:
if seg[0, 0] == seg[1, 0] or seg[0, 1] == seg[1, 1]:
line = StraightLine(seg)
board.draw_line(line)
return board.overlaps()
def part2():
board = Board((1 + xmax, 1 + ymax))
for seg in segments:
line = None
if seg[0, 0] == seg[1, 0] or seg[0, 1] == seg[1, 1]:
line = StraightLine(seg)
else:
line = DiagonalLine(seg)
board.draw_line(line)
return board.overlaps()
if __name__ == "__main__":
print(f"Solution for part 1: {part1()}")
print(f"Solution for part 2: {part2()}")
| StarcoderdataPython |
6521479 | """
This module contains unit tests of single_peer_satisfaction_neutral().
"""
import copy
from typing import List, NamedTuple
import pytest
from scenario import Scenario
from engine import Engine
import performance_candidates
from ..__init__ import SCENARIO_SAMPLE, ENGINE_SAMPLE
# The arrange helper function needed in this module is exactly the same as in
# test_single_peer_order_receipt_ratio.py so we import it.
# We will be using the same CASE_3 as in test_single_peer_order_receipt_ratio.py so we import it.
from .test_single_peer_order_receipt_ratio import arrange_for_test, CASE_3
class CaseType(NamedTuple):
"""
Data type for test cases in this module. All elements are the same as CaseType in
test_single_peer_order_receipt_ratio.py except the last one.
"""
scenario: Scenario
engine: Engine
num_order: int
order_birth_time_list: List[int]
order_id_owned_by_peer: List[int]
order_id_in_stat: List[int]
max_age: int
window: int
expected_result: float # expected satisfaction result.
# Case 1 is very similar to case 1 in test_single_peer_order_receipt_ratio.py.
# Expected result is the average of non-None elements in expected_result in case 1 in
# test_single_peer_order_receipt_ratio.py
CASE_1 = CaseType(
scenario=SCENARIO_SAMPLE,
engine=ENGINE_SAMPLE,
num_order=11,
order_birth_time_list=[100, 100, 99, 70, 60, 55, 55, 12, 6, 1, 0],
order_id_owned_by_peer=[0, 1, 3, 7, 9, 10],
order_id_in_stat=[0, 2, 3, 4, 5, 6, 7, 8, 10],
max_age=100,
window=10,
expected_result=0.5,
)
# Case 2 is very similar to case 2 in test_single_peer_order_receipt_ratio.py.
# Expected result is the average of non-None elements in expected_result in case 2 in
# test_single_peer_order_receipt_ratio.py
CASE_2 = CaseType(
scenario=SCENARIO_SAMPLE,
engine=ENGINE_SAMPLE,
num_order=11,
order_birth_time_list=[100, 100, 99, 70, 60, 55, 55, 12, 6, 1, 0],
order_id_owned_by_peer=[0, 1, 3, 7, 9, 10],
order_id_in_stat=[0, 2, 3, 4, 5, 6, 7, 8, 10],
max_age=101,
window=10,
expected_result=3.5 / 6,
)
@pytest.mark.parametrize(
"scenario, engine, num_order, order_birth_time_list, order_id_owned_by_peer, "
"order_id_in_stat, max_age, window, expected_result",
[CASE_1, CASE_2],
)
def test_single_peer_satisfaction_neutral__normal(
scenario: Scenario,
engine: Engine,
num_order: int,
order_birth_time_list: List[int], # all birth times are normal
order_id_owned_by_peer: List[int],
order_id_in_stat: List[int],
max_age: int,
window: int,
expected_result: float,
):
"""
This function tests normal cases.
"""
# Arrange
peer, order_set = arrange_for_test(
scenario,
engine,
num_order,
order_birth_time_list,
order_id_owned_by_peer,
order_id_in_stat,
)
# Act
satisfaction = performance_candidates.single_peer_satisfaction_neutral(
cur_time=100,
peer=peer,
max_age_to_track=max_age,
statistical_window=window,
order_set=order_set,
)
# Assert.
assert satisfaction == expected_result
# Case 3 is the same as case 3 in test_single_peer_order_receipt_ratio.py. Some error expected.
@pytest.mark.parametrize(
"scenario, engine, num_order, order_birth_time_list_abnormal, order_id_owned_by_peer, "
"order_id_in_stat, max_age, window, _expected_result",
[CASE_3],
)
def test_single_peer_satisfaction_neutral__negative_age(
scenario: Scenario,
engine: Engine,
num_order: int,
# one birth time will be abnormal (> cur_time)
order_birth_time_list_abnormal: List[int],
order_id_owned_by_peer: List[int],
order_id_in_stat: List[int],
max_age: int,
window: int,
_expected_result: float,
):
"""
This function tests negative order age.
"""
# Arrange
peer, order_set = arrange_for_test(
scenario,
engine,
num_order,
order_birth_time_list_abnormal,
order_id_owned_by_peer,
order_id_in_stat,
)
# Act and Asset.
with pytest.raises(ValueError, match="Some order age is negative."):
performance_candidates.single_peer_satisfaction_neutral(
cur_time=100,
peer=peer,
max_age_to_track=max_age,
statistical_window=window,
order_set=order_set,
)
# Case 4 contains no order for statistics. Error expected.
CASE_4 = copy.deepcopy(CASE_2)
CASE_4.order_id_in_stat.clear()
@pytest.mark.parametrize(
"scenario, engine, num_order, order_birth_time_list, order_id_owned_by_peer, "
"order_id_in_stat_empty, max_age, window, _expected_result",
[CASE_4],
)
def test_single_peer_satisfaction_neutral__no_order(
scenario: Scenario,
engine: Engine,
num_order: int,
order_birth_time_list: List[int],
order_id_owned_by_peer: List[int],
order_id_in_stat_empty: List[int], # This will be empty
max_age: int,
window: int,
_expected_result: float,
):
"""
This function tests non-existence of orders.
"""
# Arrange
peer, order_set = arrange_for_test(
scenario,
engine,
num_order,
order_birth_time_list,
order_id_owned_by_peer,
order_id_in_stat_empty,
)
# Act and Asset.
with pytest.raises(
RuntimeError, match="Unable to judge a single peer satisfaction"
):
performance_candidates.single_peer_satisfaction_neutral(
cur_time=100,
peer=peer,
max_age_to_track=max_age,
statistical_window=window,
order_set=order_set,
)
| StarcoderdataPython |
1693170 | <filename>cleverhans/attacks/fast_feature_adversaries.py
"""
The FastFeatureAdversaries attack
"""
# pylint: disable=missing-docstring
import warnings
import numpy as np
import tensorflow as tf
from cleverhans.attacks.attack import Attack
from cleverhans.compat import reduce_sum
from cleverhans.model import Model
from cleverhans.utils_tf import clip_eta
class FastFeatureAdversaries(Attack):
"""
This is a fast implementation of "Feature Adversaries", an attack
against a target internal representation of a model.
"Feature adversaries" were originally introduced in (Sabour et al. 2016),
where the optimization was done using LBFGS.
Paper link: https://arxiv.org/abs/1511.05122
This implementation is similar to "Basic Iterative Method"
(Kurakin et al. 2016) but applied to the internal representations.
:param model: cleverhans.model.Model
:param sess: optional tf.Session
:param dtypestr: dtype of the data
:param kwargs: passed through to super constructor
"""
def __init__(self, model, sess=None, dtypestr='float32', **kwargs):
"""
Create a FastFeatureAdversaries instance.
"""
super(FastFeatureAdversaries, self).__init__(model, sess, dtypestr,
**kwargs)
self.feedable_kwargs = ('eps', 'eps_iter', 'clip_min', 'clip_max')
self.structural_kwargs = ['ord', 'nb_iter', 'layer']
assert isinstance(self.model, Model)
def parse_params(self,
layer=None,
eps=0.3,
eps_iter=0.05,
nb_iter=10,
ord=np.inf,
clip_min=None,
clip_max=None,
**kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param layer: (required str) name of the layer to target.
:param eps: (optional float) maximum distortion of adversarial example
compared to original input
:param eps_iter: (optional float) step size for each attack iteration
:param nb_iter: (optional int) Number of attack iterations.
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Save attack-specific parameters
self.layer = layer
self.eps = eps
self.eps_iter = eps_iter
self.nb_iter = nb_iter
self.ord = ord
self.clip_min = clip_min
self.clip_max = clip_max
# Check if order of the norm is acceptable given current implementation
if self.ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True
def attack_single_step(self, x, eta, g_feat):
"""
TensorFlow implementation of the Fast Feature Gradient. This is a
single step attack similar to Fast Gradient Method that attacks an
internal representation.
:param x: the input placeholder
:param eta: A tensor the same shape as x that holds the perturbation.
:param g_feat: model's internal tensor for guide
:return: a tensor for the adversarial example
"""
adv_x = x + eta
a_feat = self.model.fprop(adv_x)[self.layer]
# feat.shape = (batch, c) or (batch, w, h, c)
axis = list(range(1, len(a_feat.shape)))
# Compute loss
# This is a targeted attack, hence the negative sign
loss = -reduce_sum(tf.square(a_feat - g_feat), axis)
# Define gradient of loss wrt input
grad, = tf.gradients(ys=loss, xs=adv_x)
# Multiply by constant epsilon
scaled_signed_grad = self.eps_iter * tf.sign(grad)
# Add perturbation to original example to obtain adversarial example
adv_x = adv_x + scaled_signed_grad
# If clipping is needed,
# reset all values outside of [clip_min, clip_max]
if (self.clip_min is not None) and (self.clip_max is not None):
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
adv_x = tf.stop_gradient(adv_x)
eta = adv_x - x
eta = clip_eta(eta, self.ord, self.eps)
return eta
def generate(self, x, g, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param g: The target value of the symbolic representation
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
g_feat = self.model.fprop(g)[self.layer]
# Initialize loop variables
eta = tf.random.uniform(
tf.shape(input=x), -self.eps, self.eps, dtype=self.tf_dtype)
eta = clip_eta(eta, self.ord, self.eps)
def cond(i, _):
return tf.less(i, self.nb_iter)
def body(i, e):
new_eta = self.attack_single_step(x, e, g_feat)
return i + 1, new_eta
_, eta = tf.while_loop(cond=cond, body=body, loop_vars=(tf.zeros([]), eta), back_prop=True,
maximum_iterations=self.nb_iter)
# Define adversarial example (and clip if necessary)
adv_x = x + eta
if self.clip_min is not None and self.clip_max is not None:
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x
| StarcoderdataPython |
1785230 | from PIL import Image
from DDRDataTypes import DDRScreenshot, DDRParsedData
from IIDXDataTypes import IIDXScreenshot, IIDXParsedData
import sys, requests, io, os
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: DDRGenie.py [path to screenshot image file]")
exit(0)
sshot = Image.open(sys.argv[1])
if 'debug' in sys.argv:
do_debug = True
else:
do_debug = False
if sshot.width == 400:
sst = IIDXScreenshot
pdt = IIDXParsedData
elif sshot.width == 600:
sst = DDRScreenshot
pdt = DDRParsedData
else:
sst = None
pdt = None
if 'upscale' in sys.argv:
if not os.path.exists('deepai_key.txt'):
print("To upscale you need to have a DeepAI API key in the working directory.\n"
" Make deepai_key.txt and save your key there and try again.")
print("Skipping upscale...")
mult = 1
else:
with open('deepai_key.txt', 'r') as f:
apikey = f.read()
mult = 2
imgArr = io.BytesIO()
sshot.save(imgArr, format='PNG')
print("Uploading image to waifu2x cloud...")
r = requests.post(
"https://api.deepai.org/api/waifu2x",
files={
'image': imgArr.getvalue(),
},
headers={'api-key': '%s' % apikey.strip()}
)
js = r.json()
if do_debug:
print(js)
if 'output_url' in js:
print("Downloading upscaled image...")
r1 = requests.get(js['output_url'])
reqdata = io.BytesIO(r1.content)
sshot = Image.open(reqdata)
if do_debug:
sshot.save("%s-2x.png" % sys.argv[1], format='PNG')
else:
mult = 1
if sst is not None:
i = sst(sshot, size_multiplier=mult)
d = pdt(i, debug=False)
if isinstance(d, DDRParsedData):
if d.is_course:
print("Course screenshot detected!")
i = sst(sshot, size_multiplier=mult, course=True)
d = pdt(i, debug=do_debug, course=True)
else:
d = pdt(i, debug=do_debug, course=False)
print("%s|%s (C: %f)|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s" % (d.dancer_name, d.song_title, d.title_conf, d.song_artist, d.play_letter_grade, d.play_money_score, d.play_max_combo,
d.play_ex_score, d.score_marv_count, d.score_perfect_count, d.score_great_count,
d.score_good_count, d.score_OK_count, d.score_miss_count, d.speed_mod, d.date_stamp,
d.chart_difficulty, d.chart_play_mode, d.chart_difficulty_number))
elif isinstance(d, IIDXParsedData):
print(d)
| StarcoderdataPython |
3394292 | <gh_stars>0
import tweepy, json, time
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_KEY = ''
ACCESS_SECRET = ''
AUTH = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
AUTH.set_access_token(ACCESS_KEY, ACCESS_SECRET)
API = tweepy.API(AUTH, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# TODO: Tune virality
def virality(status):
return (1.0*status.retweet_count)/status.user.followers_count
def getTimeline():
page = 1
newTimeline = API.home_timeline(since_id=lastRetweeted.retweeted_status.id, page=page)
timeline = newTimeline
while len(newTimeline) > 0:
page += 1
newTimeline = API.home_timeline(since_id=lastRetweeted.retweeted_status.id, page=page)
timeline = newTimeline + timeline
return timeline
lastRetweeted = API.user_timeline()[0]
print(lastRetweeted.text)
print
timeline = getTimeline()
bestStatus = timeline[0]
while True:
for status in timeline:
if virality(status) > virality(bestStatus):
bestStatus = status
compareId = bestStatus.id
if bestStatus.retweeted:
compareId = bestStatus.retweeted_status.id
if lastRetweeted.retweeted_status.id != compareId:
#print json.dumps(lastRetweeted._json, indent=4)
#print "---------------------------------------------"
#print json.dumps(bestStatus._json, indent=4)
#print
API.retweet(bestStatus.id)
lastRetweeted = API.user_timeline()[0]
print "RT!"
print bestStatus.text
print "Sleeping..."
time.sleep(60)
timeline = getTimeline()
| StarcoderdataPython |
6411662 | from django.db import models
from django.conf import settings
#from language.views_common import yesterday
import datetime
## Acquisition of yesterday, correctly
def yesterday():
return datetime.datetime.now() - datetime.timedelta(days = 1)
class Sentence(models.Model):
number = models.IntegerField('Number', default=0, blank= True, null=True)
sentence = models.CharField('Sentence', max_length=3000)
pinyin = models.CharField('Pinyin', max_length=3000, blank= True, null=True)
equivalence = models.CharField('Equivalence in English', max_length=3000, blank= True, null=True)
def __str__ (self):
return '('+str(self.number)+')'+self.sentence
def find (number):
return Sentence.objects.get(number = number)
def get_sentence (number):
return Sentence.find(number).sentence
def amount_total ():
return Sentence.objects.all().count()
class User_Sentence(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
number = models.IntegerField('Sentence Number')
time = models.DateTimeField('Time')
box = models.IntegerField('Box', default=0)
def create (user, time, number = 1):
User_Sentence.objects.create(user = user,
number = number,
time = time)
def find (user):
return User_Sentence.objects.get(user = user)
def find (user, level):
return User_Sentence.objects.get(user = user,
number = level)
def relationship (user):
return User_Sentence.objects.filter(user = user)
def latest_relationship (user):
relationship = User_Sentence.relationship(user)
if not relationship:
User_Sentence.create(user, yesterday())
relationship = User_Sentence.relationship(user).order_by('time').first()
return relationship
def get_level (user):
relationship = User_Sentence.relationship(user).order_by('-number').first()
return relationship.number
class User_TimeSettings (models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
time_settings = models.CharField('Time Settings', max_length=3000, default = '[15, 30, 60, 120, 600, 3600, 18000, 86400, 432000, 2160000, 10368000, 31536000]') | StarcoderdataPython |
3369999 | __all__ = ["graphic", "play", "sound"]
from . import graphic
from . import play
from . import sound
| StarcoderdataPython |
6491183 | <reponame>huuhoa/adaptivecards<gh_stars>1-10
import json
class PropertyType:
def __init__(self, type, key_name=None):
self.key_name = key_name
self.type = type
def __get__(self, instance, owner):
return instance.get_data(self.key_name)
def __set__(self, instance, value):
if not isinstance(value, self.type):
raise TypeError(f'{self.key_name} must be of type(s) {self.type} (got {value!r})')
instance.set_data(self.key_name, value)
def __set_name__(self, owner, name):
if self.key_name is None:
self.key_name = name
class AdaptiveCardJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, BareObject):
return obj._data
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
class BareObject():
def __init__(self, *args, **kwargs):
super().__init__()
self._data = {}
# init data
if kwargs:
for prop in tuple(kwargs):
try:
self.__setattr__(prop, kwargs[prop])
except AttributeError:
pass
def __str__(self):
return json.dumps(self.render(), sort_keys=True, indent=2, cls=AdaptiveCardJSONEncoder)
def __repr__(self):
return self.__str__()
def render(self):
"""
Transform instance object to dict that follows the schema of adaptivecards.io
Returns
-------
dict : data follow schema of adaptivecards
"""
return self._data
def get_data(self, key):
return self._data.get(key)
def set_data(self, key, value):
self._data[key] = value
class Element(BareObject):
element_type = 'None'
fallback = PropertyType(type=str)
height = PropertyType(type=str)
separator = PropertyType(type=bool)
spacing = PropertyType(type=str)
visible = PropertyType(key_name='isVisible', type=bool)
id = PropertyType(type=str)
requires = PropertyType(type=dict)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._data['type'] = self.element_type
| StarcoderdataPython |
1979676 | <reponame>LeoIV/sparse-ho
import pytest
import numpy as np
from scipy.sparse import csc_matrix
from sklearn import linear_model
from sklearn.model_selection import KFold
import celer
from celer.datasets import make_correlated_data
from sparse_ho.utils import Monitor
from sparse_ho.models import Lasso
from sparse_ho.criterion import (
HeldOutMSE, FiniteDiffMonteCarloSure, CrossVal, HeldOutLogistic)
from sparse_ho.grid_search import grid_search
n_samples = 100
n_features = 100
snr = 3
corr = 0.5
X, y, _ = make_correlated_data(
n_samples, n_features, corr=corr, snr=snr, random_state=42)
sigma_star = 0.1
y = np.sign(y)
X_s = csc_matrix(X)
idx_train = np.arange(0, 50)
idx_val = np.arange(50, 100)
alpha_max = np.max(np.abs(X[idx_train, :].T @ y[idx_train])) / len(idx_train)
alphas = alpha_max * np.geomspace(1, 0.1)
alpha_min = 0.0001 * alpha_max
estimator = linear_model.Lasso(
fit_intercept=False, max_iter=10000, warm_start=True)
model = Lasso(estimator=estimator)
tol = 1e-8
# Set models to be tested
models = {}
models["lasso"] = Lasso(estimator=None)
models["lasso_custom"] = Lasso(estimator=celer.Lasso(
warm_start=True, fit_intercept=False))
@pytest.mark.parametrize('model_name', list(models.keys()))
@pytest.mark.parametrize('XX', [X, X_s])
def test_cross_val_criterion(model_name, XX):
model = models[model_name]
alpha_min = alpha_max / 10
max_iter = 10000
n_alphas = 10
kf = KFold(n_splits=5, shuffle=True, random_state=56)
monitor_grid = Monitor()
if model_name.startswith("lasso"):
sub_crit = HeldOutMSE(None, None)
else:
sub_crit = HeldOutLogistic(None, None)
criterion = CrossVal(sub_crit, cv=kf)
grid_search(
criterion, model, XX, y, alpha_min, alpha_max,
monitor_grid, max_evals=n_alphas, tol=tol)
if model_name.startswith("lasso"):
reg = linear_model.LassoCV(
cv=kf, verbose=True, tol=tol, fit_intercept=False,
alphas=np.geomspace(alpha_max, alpha_min, num=n_alphas),
max_iter=max_iter).fit(X, y)
else:
reg = linear_model.LogisticRegressionCV(
cv=kf, verbose=True, tol=tol, fit_intercept=False,
Cs=len(idx_train) / np.geomspace(
alpha_max, alpha_min, num=n_alphas),
max_iter=max_iter, penalty='l1', solver='liblinear').fit(X, y)
reg.score(XX, y)
if model_name.startswith("lasso"):
objs_grid_sk = reg.mse_path_.mean(axis=1)
else:
objs_grid_sk = reg.scores_[1.0].mean(axis=1)
# these 2 value should be the same
(objs_grid_sk - np.array(monitor_grid.objs))
np.testing.assert_allclose(objs_grid_sk, monitor_grid.objs)
# TOD0 factorize this tests
def test_grid_search():
max_evals = 5
monitor_grid = Monitor()
model = Lasso(estimator=estimator)
criterion = HeldOutMSE(idx_train, idx_train)
alpha_opt_grid, _ = grid_search(
criterion, model, X, y, alpha_min, alpha_max,
monitor_grid, max_evals=max_evals,
tol=1e-5, samp="grid")
monitor_random = Monitor()
criterion = HeldOutMSE(idx_train, idx_val)
alpha_opt_random, _ = grid_search(
criterion, model, X, y, alpha_min, alpha_max,
monitor_random,
max_evals=max_evals, tol=1e-5, samp="random")
np.testing.assert_allclose(monitor_random.alphas[
np.argmin(monitor_random.objs)], alpha_opt_random)
np.testing.assert_allclose(monitor_grid.alphas[
np.argmin(monitor_grid.objs)], alpha_opt_grid)
monitor_grid = Monitor()
model = Lasso(estimator=estimator)
criterion = FiniteDiffMonteCarloSure(sigma=sigma_star)
alpha_opt_grid, _ = grid_search(
criterion, model, X, y, alpha_min, alpha_max,
monitor_grid, max_evals=max_evals,
tol=1e-5, samp="grid")
monitor_random = Monitor()
criterion = FiniteDiffMonteCarloSure(sigma=sigma_star)
alpha_opt_random, _ = grid_search(
criterion, model, X, y, alpha_min, alpha_max,
monitor_random,
max_evals=max_evals, tol=1e-5, samp="random")
np.testing.assert_allclose(monitor_random.alphas[
np.argmin(monitor_random.objs)], alpha_opt_random)
np.testing.assert_allclose(monitor_grid.alphas[
np.argmin(monitor_grid.objs)], alpha_opt_grid)
if __name__ == '__main__':
for model_name in models.keys():
test_cross_val_criterion(model_name)
# test_grid_search()
| StarcoderdataPython |
1653784 | from ._pymimkl import *
# importing wrapped models
from .average_mkl import AverageMKL
from .easy_mkl import EasyMKL
from .umkl_knn import UMKLKNN
# cleanup unused objects
del(EasyMKL_)
del(UMKLKNN_)
del(AverageMKL_)
| StarcoderdataPython |
215261 | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 29 09:54:51 2020
@author: bruger
"""
| StarcoderdataPython |
356951 | <reponame>yingstat/SingleCellOpenProblems<filename>test/utils/name.py
import parameterized
def object_name(x):
"""Get a human readable name for an object."""
if hasattr(x, "__name__"):
return x.__name__
elif hasattr(x, "__func__"):
return object_name(x.__func__)
else:
return str(x)
def name_test(testcase_func, param_num, param):
"""Get a human readable name for a parameterized test."""
args = param.values() if isinstance(param, dict) else param.args
return "%s_%s" % (
testcase_func.__name__,
parameterized.parameterized.to_safe_name(
"_".join(object_name(x) for x in args)
),
)
| StarcoderdataPython |
1716713 | <reponame>detrout/htsworkflow
import argparse
import RDF
import jinja2
from pprint import pprint
from htsworkflow.util.rdfhelp import \
get_model, \
get_serializer, \
sparql_query, \
libraryOntology, \
load_into_model
from htsworkflow.util.rdfns import *
TYPE_N = rdfNS['type']
CREATION_DATE = libraryOntology['date']
from encode_find import DBDIR
DEFAULT_GENOME='hg19'
DEFAULT_OUTPUT='/tmp/submission_report.html'
def main(cmdline=None):
parser = make_parser()
args = parser.parse_args(cmdline)
model = get_model('encode', DBDIR)
report = what_have_we_done(model, genome=args.genome)
with open(DEFAULT_OUTPUT,'w') as stream:
stream.write(report)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--genome', default=DEFAULT_GENOME,
help='limit to one genome')
parser.add_argument('--output', default='/tmp/submission_report.html',
help="specify where to write to write report")
return parser
SUBMISSION_QUERY = """
PREFIX xsd:<http://www.w3.org/2001/XMLSchema#>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX ucscSubmission:<http://jumpgate.caltech.edu/wiki/UcscSubmissionOntology#>
PREFIX libraryOntology:<http://jumpgate.caltech.edu/wiki/LibraryOntology#>
PREFIX daf: <http://jumpgate.caltech.edu/wiki/UcscDaf#>
PREFIX ddf: <http://encodesubmit.ucsc.edu/pipeline/download_ddf#>
SELECT distinct ?assembly ?experiment ?library_urn ?library_name ?submission ?submission_status ?submission_name ?date
WHERE {{
?submission ucscSubmission:library_urn ?library_urn ;
ucscSubmission:has_status ?status ;
ucscSubmission:name ?submission_name ;
libraryOntology:date ?date .
?status daf:assembly ?assembly ;
ucscSubmission:status ?submission_status .
OPTIONAL {{ ?library_urn libraryOntology:name ?library_name . }}
OPTIONAL {{ ?library_urn libraryOntology:experiment_type ?experiment . }}
FILTER(!regex(?submission_status, "revoked", "i"))
{assembly_filter}
}}
ORDER BY ?assembly ?experiment ?library_urn ?submission
"""
SUBMISSION_TEMPLATE = '''
<html>
<head>
<style type="text/css">
table { border-width: 0 0 1px 1px; border-style: solid; }
th,td { border-width: 1px 1px 0 0; border-style: solid; margin: 0;}
.library { font-size: 18pt; background-color: #EEF; }
.submission { font-size: 12pt; background-color: #EFE;}
</style>
<title>Submission report for {{ genome }}</title>
</head>
<body>
<h1>Genome: {{ genome }}</h1>
{% for experiment in libraries %}
<h2>{{ experiment }}</h2>
<table>
<thead>
<tr class="library">
<td>Library ID</td>
<td colspan="3">Library Name</td>
</tr>
<tr class="submission">
<td>Submission ID</td>
<td>Last Updated</td>
<td>Status</td>
<td>Submission name</td>
</tr>
</thead>
<tbody>
{% for liburn, records in libraries[experiment]|dictsort %}
<!-- {{ liburn }} -->
<tr class="library">
<td>
<a href="{{libraries[experiment][liburn].0.library_urn}}">
{{ libraries[experiment][liburn].0.library_urn | trim_rdf}}
</a>
</td>
<td colspan="3">{{ libraries[experiment][liburn].0.library_name }}</td>
</tr>
{% for record in records|sort %}
<tr class="submission">
<td><a href="{{record.submission}}">{{record.submission|trim_rdf}}</a></td>
<td>{{ record.date|timestamp_to_date }}</td>
<td>{{ record.submission_status }}</td>
<td>{{ record.submission_name }}</td>
</tr>
{% endfor %}
{% endfor %}
</tbody>
</table>
{% endfor %}
</body>
</html>
'''
def what_have_we_done(model, genome):
assembly_filter = ''
assembly_filter = 'FILTER(regex(?assembly, "{0}", "i"))'.format(genome)
query = SUBMISSION_QUERY.format(
assembly_filter=assembly_filter
)
compiled_query = RDF.SPARQLQuery(query)
submissions = compiled_query.execute(model)
libraries = group_by_library(submissions)
environment = jinja2.Environment()
environment.filters['trim_rdf'] = trim_rdf
environment.filters['timestamp_to_date'] = timestamp_to_date
template = environment.from_string(SUBMISSION_TEMPLATE)
return template.render(libraries=libraries,
genome=genome)
def group_by_library(submissions):
libraries = {}
for record in submissions:
urn = str(record['library_urn'].uri)
experiment = str(record['experiment'])
libraries.setdefault(experiment, {}).setdefault(urn, []).append(record)
return libraries
def trim_rdf(value):
if value is None:
return
value = str(value)
if len(value) == 0:
return value
if value[-1] == '/':
value = value[:-1]
split_value = value.split('/')
if len(split_value) == 0:
return value
return split_value[-1]
def timestamp_to_date(value):
datestamp, timestamp = str(value).split('T')
return datestamp
if __name__ == "__main__":
main()
| StarcoderdataPython |
1642372 | <gh_stars>1-10
"""
input:
- a fasta file with all sequences used for all-by-all blast
- a file with all the unfiltered results from all-by-all blastn or blastp
Currently assume that query and hit are the same direction
Ignore hits from the same taxa
Check for ends that doesn't have any hits in any other taxa
output:
- .cutinfo file with start and end of seq to keep, and size of seq
- .cut file with all the sequences after cutting
"""
import os,sys
from Bio import SeqIO
MIN_SEQ_LEN = 40
#if taxon id pattern changes, change it here
def get_taxonid(seqid):
return seqid.split("@")[0]
def cut_seq_ends(fasta,blast_output,logfile="log"):
print "Reading raw blast output"
cutDICT = {} #key is seqid, value is a list [start,end,length]
with open(blast_output,"r") as infile:
for line in infile:
if len(line) < 3:
continue #skip empty lines
spls = line.strip().split("\t")
query,hit = spls[0],spls[2]
if get_taxonid(query) == get_taxonid(hit):
continue #ignore hits from the same taxa
qlen,qstart,qend = int(spls[1]),int(spls[10]),int(spls[11])
slen,sstart,send = int(spls[3]),int(spls[12]),int(spls[13])
#get the widest range
if query not in cutDICT:
cutDICT[query] = [10000000,1,qlen] #[start,end,qlen]
if hit not in cutDICT:
cutDICT[hit] = [10000000,1,slen] #[start,end,slen]
cutDICT[query][0] = min(cutDICT[query][0],qstart,qend) #compare starts
cutDICT[query][1] = max(cutDICT[query][1],qstart,qend) #compare ends
cutDICT[hit][0] = min(cutDICT[hit][0],sstart,send) #compare starts
cutDICT[hit][1] = max(cutDICT[hit][1],sstart,send) #compare ends
#output seqid, start and end for cutting, and seq length
with open(blast_output+".cutinfo","w") as outfile:
for seqid in cutDICT:
start,end,length = cutDICT[seqid] #[start,end,length]
outfile.write(seqid+"\t"+str(start)+"\t"+str(end)+"\t"+str(length)+"\n")
print "Output written to",sys.argv[1]+".cutinfo"
print "Cutting"
outfile = open(fasta+".cut","w")
outdict = {} # key is taxonid, value is [before,after,half_left]
with open(fasta,"r") as handle:
for record in SeqIO.parse(handle,"fasta"):
seqid,seq = str(record.id),str(record.seq)
taxonid = get_taxonid(seqid)
if taxonid not in outdict:
outdict[taxonid]= [0,0,0]
outdict[taxonid][0] += 1
if seqid in cutDICT:
start,end,length = cutDICT[seqid]
seq_cut = seq[start-1:end]
if len(seq_cut) >= (len(seq)/2):
outdict[taxonid][1] += 1 # at least half survived cutting
#print seqid, start, end, length,end-start+1,MIN_SEQ_LEN
if len(seq_cut) >= MIN_SEQ_LEN:
outfile.write(">"+seqid+"\n"+seq_cut+"\n")
outdict[taxonid][2] += 1
else: pass # remove seqs with no interspecific hits
outfile.close()
summary = "taxonid\tbefore\thalf_left\tafter\tperc_survive\n"
for i in outdict:
out = outdict[i]
summary+= i+"\t"+str(out[0])+"\t"+str(out[1])+"\t"+str(out[2])+"\t"
summary+= str(out[2]/float(out[0])*100)+"%\n"
print summary
with open(logfile,"a") as f: f.write(summary)
return fasta+".cut"
if __name__ =="__main__":
if len(sys.argv) != 3:
print "usage: cut_seq_ends.py fasta blast_output"
sys.exit()
fasta,blast_output = sys.argv[1:]
cut_seq_ends(fasta,blast_output)
| StarcoderdataPython |
11262013 | # -*- coding: utf-8 -*-
class PoetFileError(Exception):
pass
class MissingElement(PoetFileError):
def __init__(self, element):
super(MissingElement, self).__init__(
'The poetry.toml file is missing the [{}] element'.format(element)
)
class InvalidElement(PoetFileError):
def __init__(self, element, extra_info=''):
msg = 'The element [{}] is invalid'.format(element)
if extra_info:
msg += ' ({})'.format(extra_info)
super(InvalidElement, self).__init__(msg)
| StarcoderdataPython |
9654155 | <gh_stars>0
#!/usr/bin/env python
"""resinOS version distribution plot
Display the resinOS versions as time series, based on the fleet score data record.
"""
from datetime import datetime
import numpy
import xlrd
import semver
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
MC_VERSION = ">=2.12.0"
def get_date(datestring):
""" Convert a 'YYYYMMDD' format into a date object
Args:
datestring (string): the date to convert, in 'YYMMDD' format
Rerturn:
datetime.date: the resulting date object
"""
return datetime.strptime(datestring, "%Y%m%d").date()
def load_counts(workbook):
""" Create a dictionary with the device version counts time series
from the given spreadsheetself.
Args:
workbook (xlrd.book.Book): a fleetscore spreadsheet in XLS format
Returns:
dict: the collected time series data, with the key being the OS version,
and the value is an (, 2) numpy array, with date | count columns
"""
date_sheets = workbook.sheet_names()[3:] # first 3 sheets are not needed:
# OS version, supervisor version, mods
dates = numpy.empty((0, 2))
# Add a row for each date we find
for date_sheet_name in date_sheets:
dates = numpy.append(
dates, numpy.array([[get_date(date_sheet_name), 0]]), axis=0
)
# Create a dict with a copy of that empty array for all known resinOS versions
oslist = {}
ossheet = workbook.sheet_by_name("OSVer")
for row_idx in range(1, ossheet.nrows):
version = ossheet.cell(row_idx, 0).value
oslist[version] = dates.copy()
# Special Sheets: will hold the sum for the 1.x and 2.x devices
extra_versions = ["1.x", "2.x", "mc-capable", "non-mc-capable"]
for ver in extra_versions:
oslist[ver] = dates.copy()
# Load all the counts into the sheets by date
for daily in date_sheets:
daily_sheet = workbook.sheet_by_name(daily)
daily_date = get_date(daily)
for row_idx in range(2, daily_sheet.nrows):
version = daily_sheet.cell(row_idx, 0).value
count = int(daily_sheet.cell(row_idx, 2).value)
# Up the count for the version
dayindex = numpy.where(oslist[version] == daily_date)[0][0]
oslist[version][dayindex, 1] += count
# Up the count for the major verison
if version[0] == "2":
special_version = "2.x"
else:
special_version = "1.x"
dayindex = numpy.where(oslist[special_version] == daily_date)[0][0]
oslist[special_version][dayindex, 1] += count
if semver.match(version, MC_VERSION):
mc_capability = "mc-capable"
else:
mc_capability = "non-mc-capable"
dayindex = numpy.where(oslist[mc_capability] == daily_date)[0][0]
oslist[mc_capability][dayindex, 1] += count
return oslist
def format_plot(fig, ax, title, xlim):
"""Format a plot uniformly for the time series.
Args:
fig (matplotlib.figure.Figure): the main figure object to format
ax (matplotlib.axes._subplots.AxesSubplot): the corresponding subplot
title (str): plot title
xlim (tuple): the x-axis limit tuple
"""
months = mdates.MonthLocator() # every month
days = mdates.DayLocator() # every day
months_format = mdates.DateFormatter("%Y %b")
fig.autofmt_xdate()
ax.legend()
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(months_format)
ax.xaxis.set_minor_locator(days)
ax.set_title(title)
ax.set_ylabel("Count")
ax.set_ylim(0)
ax.set_xlim(xlim)
ax.grid(True)
def plot_data(oslist, special_versions):
"""Plot the version data
Args:
oslist (dict): collection of time series data to plot, key is the version,
value is a timeseries
special_versions (list): versions to highlight, which keys in oslist to emphasize
"""
# Plot by version
fig1, ax1 = plt.subplots(figsize=(12, 8), dpi=150)
# Plot by major version
fig2, ax2 = plt.subplots(figsize=(12, 8), dpi=150)
major_versions = ["1.x", "2.x"]
# Plot by Multicontainer (MC) capability
fig3, ax3 = plt.subplots(figsize=(12, 8), dpi=150)
mc_capability = ["mc-capable", "non-mc-capable"]
# Some markers, nothing special in how many there are, could add more
markers = [None, "o", "^", "v", "s", "d", "D", "|"]
count = 0
for key in oslist:
ax = ax1
if key in major_versions:
ax = ax2
if key in mc_capability:
ax = ax3
counts = oslist[key]
if key in special_versions:
linewidth = 3
linestyle = "-"
ax.plot(
counts[:, 0],
counts[:, 1],
linewidth=linewidth,
linestyle=linestyle,
label=key,
marker=markers[count % len(markers)],
)
count += 1
else:
linewidth = 1
linestyle = "--"
ax.plot(
counts[:, 0], counts[:, 1], linewidth=linewidth, linestyle=linestyle
)
XLIM = (counts[0, 0], counts[-1, 0])
format_plot(
fig1, ax1, "Device count in a rolling 28-day window by OS version", XLIM
)
format_plot(
fig2, ax2, "Device count in a rolling 28-day window by major OS version", XLIM
)
format_plot(
fig3,
ax3,
"Device count in a rolling 28-day window by multicontainer capability",
XLIM,
)
plt.show()
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print(
"Please provide an input file as an argument to the script.",
file=sys.stderr,
)
sys.exit(1)
FLEETSCORE_FILE = sys.argv[1]
WORKBOOK = xlrd.open_workbook(FLEETSCORE_FILE)
OSLIST = load_counts(WORKBOOK)
# List of versions we want to highlight.
SPECIAL_VERSIONS = [
"1.x",
"2.x",
"2.15.1",
"2.13.6",
"2.12.7",
"2.12.6",
"2.12.5",
"2.12.3",
"2.9.7",
"2.7.5",
"2.3.0",
"2.2.0",
"mc-capable",
"non-mc-capable",
]
plot_data(OSLIST, SPECIAL_VERSIONS)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.