code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# --- Day 17: Trick Shot ---
import math
import time
def get_puzzle_input(filepath):
with open(filepath) as f:
for line in f:
# target area: x=269..292, y =-68..-44
parts = line.rstrip().replace(',', '').split()
[x1, x2] = parts[2][2:].split("..")
[y1, y2] = parts[3][2:].split("..")
return int(x1), int(x2), int(y1), int(y2)
class Position:
def __init__(self, vx, vy):
self.x = 0
self.y = 0
self.max_y = 0
self.vx = vx
self.vy = vy
self.steps_to_start_searching_y = 10000
# def get_position_at_step(self, t):
# if self.vx >= 0:
# x = max(0, self.vx - t) + self.x
# else:
# x = min(0, self.vx + t) + self.x
# y = min(0, self.vy + t) + self.y
# return x, y
def __eq__(self, other):
if type(other) == type([1,2]):
return other[0] == self.vx and other[1] == self.vy
return self == other
def __gt__(self, other):
return self.x > other.x
def __lt__(self, other):
return self.x < other.x
def get_position_at_step(self, t):
if self.vx >= 0:
if self.vx+1 <= t:
# there is no horizontal speed left anymore
x = 0.5* (self.vx*self.vx + self.vx)
else:
# t * self.vy - (0.5 * (t - 1) * t) # TODO
x = t*self.vx - 0.5*(t*t - t)
# vx_t == 0 <=> t >= vx+1
else:
if self.vx+1 <= t:
vx_abs = abs(self.vx)
# there is no horizontal speed left anymore
x = vx_abs * self.vx + 0.5* (vx_abs * vx_abs + vx_abs)
else:
x = t * self.vx + 0.5 * (t * t + t)
y = self.get_y_at_step(t)
return x, y
def get_y_at_step(self, t):
# vy_t = vy - t
# y_t = y_t-1 + vy_t = y_t-1 + (vy-t)
# = vy-t + vy-(t-1) + vy-(t-2) + ... + vy-0
# = (t+1) * vy - sum_0^t = (t+1) vy - t(t+1)/2
# y_1 = 0 + vy
# y_2 = vy + vy-1 = 2*vy - 1
# y_3 = 2vy -1 + vy - 2 = 3 vy - sum_1^t-1
# y_t =
return t* self.vy -(0.5 * (t-1) * t) #TODO
def __str__(self):
return "{},{} {} steps".format(str(self.vx), str(self.vy), self.steps_to_start_searching_y)
def check_if_hitting_target(self, x1, x2, y1, y2, maxsteps=1000):
### assuming positive distances
# Otherwise make: if max(x1, 0) > x > min(x2, 0): for first inequation
# if at t steps: y is under y1 but left of x1: will not hit it anymore
# | |
# x1|________|x2 .0
# .
# |
# At vx steps --> no more horizontal movement left
## if we still havent reached target in a horizontal level --> will not hit it
x, y = self.get_position_at_step(self.vx)
if x < x1:
return False
## if the y position once we only move straight down is above the bottom y line
# y2_________
# | . |
# y1|____|___|
# |
# |
if x1 <= x <= x2:
# Hits target only if movement stops above the target area, but could miss while going down
if y >= min(y1, y2):
self.steps_to_start_searching_y = self.vx
# check every step after that
t = self.vx
while y >= y1:
y = self.get_y_at_step(t)
if y2 >= y >= y1:
return True
t += 1
return False
# Else: check whether the steps before hit the target area at a integer step
t = self.vx - 1
wx,wy = self.get_position_at_step(max(t - 10, 0))
while wx > x2:
t -=10
wx,wy = self.get_position_at_step(max(t - 10, 0))
while x >= x1:
x, y = self.get_position_at_step(t)
if x1 <= x <= x2 and y2 >= y >= y1:
return True
t -= 1
#### has some mistake in it...
# next_t = round(self.vx/2)
# prev_t = self.vx
# while next_t != prev_t:
# prev_round = prev_t
# prev_t = next_t
# # new target: trunc(vx/2)
# new_x, new_y = self.get_position_at_step(next_t)
# # as long
# if x1 <= new_x <=x2 and y1 <= new_y <= y2:
# self.steps_to_start_searching_y = next_t
# return True
# elif new_x < x1:
# if new_y < y1:
# # left of target but still below
# return False
# else:
# # go to right, add half distance
# next_t += round(0.5*(prev_round - next_t))
# elif new_x <= x2 and new_y > y2:
# # go to right (steps forwards)
# next_t += round(0.5 * (prev_round - next_t))
# else:
# # now one is either still to the right of target area or under it, so to have a chance to hit it:
# # must be on an earlier step
# if prev_round > next_t:
# # now reference point is 0
# next_t = round(next_t/2)
# else:
# next_t -= round(0.5*(next_t - prev_round))
return False
def check_highest_y(self, left_t, right_t, t):
y = self.get_y_at_step(t)
# check step one to left and one to right
next_y = self.get_y_at_step(t+1)
if next_y > y:
# y+1/ \
# y / \
# \
# still going up -> take step to right
new_t = t + math.floor(0.5*(right_t - t))
return self.check_highest_y(left_t=t, right_t=right_t, t=new_t)
prev_y = self.get_y_at_step(t-1)
if prev_y > y:
# / \ y-1
# / \ y
# \
# going down --> take step to the left
new_t = t - math.floor(0.5*(t - left_t))
return self.check_highest_y(left_t=left_t, right_t=t, t=new_t)
# if neither are true: y >= next and prev y ---> highest y achieved
return y
def get_highest_y(self):
if self.vy <= 0:
# if vy is negative highest point is at start
return 0
# return self.check_highest_y(0, self.target_hit_after_steps, self.target_hit_after_steps)
return self.vy*(self.vy+1)/2
def get_highest_trick_shot(x1, x2, y1, y2):
hitting_shots = []
max_y_at_hitting_shots = []
# try different vx and vy
for vx in range(math.floor(math.sqrt(x1)), x2+1):
for vy in range(y1, 10*x2): #969
# for vy in range(-100, 5000): 969
pos = Position(vx, vy)
hits = pos.check_if_hitting_target(x1, x2, y1, y2)
if hits:
hitting_shots.append(pos)
max_y_at_hitting_shots.append(pos.get_highest_y())
return max(max_y_at_hitting_shots), len(hitting_shots), hitting_shots
def resolve_puzzle_part1(filepath):
x1, x2, y1, y2 = get_puzzle_input(filepath)
y, count, hitting_shots = get_highest_trick_shot(x1, x2, y1, y2)
print("HIghest position is: {}, Count: {}".format(y, count))
with open("hitting_shots.txt") as f:
for line in f:
hits = line.rstrip().split()
int_hits = []
for hit in hits:
[x, y] = hit.split(',')
int_hits.append([int(x), int(y)])
pass
print("TEST")
start = time.time()
resolve_puzzle_part1("test_data.txt")
print("Time: {}".format(time.time()-start))
print("PUZZLE")
start = time.time()
resolve_puzzle_part1("data.txt")
print("Time: {}".format(time.time()-start))
| [
"math.sqrt",
"time.time",
"math.floor"
] | [((7662, 7673), 'time.time', 'time.time', ([], {}), '()\n', (7671, 7673), False, 'import time\n'), ((7780, 7791), 'time.time', 'time.time', ([], {}), '()\n', (7789, 7791), False, 'import time\n'), ((6751, 6764), 'math.sqrt', 'math.sqrt', (['x1'], {}), '(x1)\n', (6760, 6764), False, 'import math\n'), ((7736, 7747), 'time.time', 'time.time', ([], {}), '()\n', (7745, 7747), False, 'import time\n'), ((7849, 7860), 'time.time', 'time.time', ([], {}), '()\n', (7858, 7860), False, 'import time\n'), ((5804, 5835), 'math.floor', 'math.floor', (['(0.5 * (right_t - t))'], {}), '(0.5 * (right_t - t))\n', (5814, 5835), False, 'import math\n'), ((6123, 6153), 'math.floor', 'math.floor', (['(0.5 * (t - left_t))'], {}), '(0.5 * (t - left_t))\n', (6133, 6153), False, 'import math\n')] |
import zipfile
zip_file = zipfile.ZipFile("zip_archive.zip", "w")
zip_file.write("textfile_for_zip_01")
zip_file.write("textfile_for_zip_02")
zip_file.write("textfile_for_zip_03")
# print(zipfile.is_zipfile("zip_archive.zip"))
# zip_file = zipfile.ZipFile("zip_archive.zip")
# print(zip_file.namelist())
# print(zip_file.infolist())
# zip_info = zip_file.getinfo("textfile_for_zip_02")
# print(zip_info.file_size)
# print(zip_file.read("textfile_for_zip_01"))
zip_file.extract("textfile_for_zip_02")
zip_file.extractall()
zip_file.close() | [
"zipfile.ZipFile"
] | [((29, 68), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""zip_archive.zip"""', '"""w"""'], {}), "('zip_archive.zip', 'w')\n", (44, 68), False, 'import zipfile\n')] |
import logging
from datetime import datetime
import numpy as np
from logging.config import dictConfig
from kafkawrapper.producer import Producer
from utils.mongo_utils import BenchMarkingProcessRepo
from configs.configs import ulca_notifier_input_topic, ulca_notifier_benchmark_completed_event, ulca_notifier_benchmark_failed_event
from models.metric_manager import MetricManager
log = logging.getLogger('file')
prod = Producer()
repo = BenchMarkingProcessRepo()
class OcrMetricEvalHandler:
def __init__(self):
pass
def execute_ocr_metric_eval(self, request):
try:
log.info("Executing Ocr Metric Evaluation.... {}".format(datetime.now()))
metric_mgr = MetricManager.getInstance()
if 'benchmarkDatasets' in request.keys():
for benchmark in request["benchmarkDatasets"]:
metric_inst = metric_mgr.get_metric_execute(benchmark["metric"], request["modelTaskType"])
if not metric_inst:
log.info("Metric definition not found")
doc = {'benchmarkingProcessId':request['benchmarkingProcessId'],'benchmarkDatasetId': benchmark['datasetId'],'eval_score': None}
repo.insert(doc)
repo.insert_pt({'benchmarkingProcessId': request['benchmarkingProcessId'], 'status': 'Failed'})
mail_notif_event = {"event": ulca_notifier_benchmark_failed_event, "entityID": request['modelId'], "userID": request['userId'], "details":{"modelName":request['modelName']}}
prod.produce(mail_notif_event, ulca_notifier_input_topic, None)
return
ground_truth = [corpus_sentence["tgt"] for corpus_sentence in benchmark["corpus"]]
machine_translation = [corpus_sentence["mtgt"] for corpus_sentence in benchmark["corpus"]]
eval_score = metric_inst.ocr_metric_eval(ground_truth, machine_translation)
if eval_score:
doc = {'benchmarkingProcessId':request['benchmarkingProcessId'],'benchmarkDatasetId': benchmark['datasetId'],'eval_score': float(np.round(eval_score, 3))}
repo.insert(doc)
repo.insert_pt({'benchmarkingProcessId': request['benchmarkingProcessId'], 'status': 'Completed'})
mail_notif_event = {"event": ulca_notifier_benchmark_completed_event, "entityID": request['modelId'], "userID": request['userId'], "details":{"modelName":request['modelName']}}
prod.produce(mail_notif_event, ulca_notifier_input_topic, None)
else:
log.exception("Exception while metric evaluation of model")
doc = {'benchmarkingProcessId':request['benchmarkingProcessId'],'benchmarkDatasetId': benchmark['datasetId'],'eval_score': None}
repo.insert(doc)
repo.insert_pt({'benchmarkingProcessId': request['benchmarkingProcessId'], 'status': 'Failed'})
mail_notif_event = {"event": ulca_notifier_benchmark_failed_event, "entityID": request['modelId'], "userID": request['userId'], "details":{"modelName":request['modelName']}}
prod.produce(mail_notif_event, ulca_notifier_input_topic, None)
else:
log.exception("Missing parameter: benchmark details")
repo.insert_pt({'benchmarkingProcessId': request['benchmarkingProcessId'], 'status': 'Failed'})
mail_notif_event = {"event": ulca_notifier_benchmark_failed_event, "entityID": request['modelId'], "userID": request['userId'], "details":{"modelName":request['modelName']}}
prod.produce(mail_notif_event, ulca_notifier_input_topic, None)
return
except Exception as e:
log.exception(f"Exception while metric evaluation of model: {str(e)}")
repo.insert_pt({'benchmarkingProcessId': request['benchmarkingProcessId'], 'status': 'Failed'})
mail_notif_event = {"event": ulca_notifier_benchmark_failed_event, "entityID": request['modelId'], "userID": request['userId'], "details":{"modelName":request['modelName']}}
prod.produce(mail_notif_event, ulca_notifier_input_topic, None)
# Log config
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
}) | [
"logging.getLogger",
"kafkawrapper.producer.Producer",
"utils.mongo_utils.BenchMarkingProcessRepo",
"models.metric_manager.MetricManager.getInstance",
"logging.config.dictConfig",
"datetime.datetime.now",
"numpy.round"
] | [((387, 412), 'logging.getLogger', 'logging.getLogger', (['"""file"""'], {}), "('file')\n", (404, 412), False, 'import logging\n'), ((421, 431), 'kafkawrapper.producer.Producer', 'Producer', ([], {}), '()\n', (429, 431), False, 'from kafkawrapper.producer import Producer\n'), ((439, 464), 'utils.mongo_utils.BenchMarkingProcessRepo', 'BenchMarkingProcessRepo', ([], {}), '()\n', (462, 464), False, 'from utils.mongo_utils import BenchMarkingProcessRepo\n'), ((4418, 5006), 'logging.config.dictConfig', 'dictConfig', (["{'version': 1, 'formatters': {'default': {'format':\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s'\n }}, 'handlers': {'info': {'class': 'logging.FileHandler', 'level':\n 'DEBUG', 'formatter': 'default', 'filename': 'info.log'}, 'console': {\n 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter':\n 'default', 'stream': 'ext://sys.stdout'}}, 'loggers': {'file': {'level':\n 'DEBUG', 'handlers': ['info', 'console'], 'propagate': ''}}, 'root': {\n 'level': 'DEBUG', 'handlers': ['info', 'console']}}"], {}), "({'version': 1, 'formatters': {'default': {'format':\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s'\n }}, 'handlers': {'info': {'class': 'logging.FileHandler', 'level':\n 'DEBUG', 'formatter': 'default', 'filename': 'info.log'}, 'console': {\n 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter':\n 'default', 'stream': 'ext://sys.stdout'}}, 'loggers': {'file': {'level':\n 'DEBUG', 'handlers': ['info', 'console'], 'propagate': ''}}, 'root': {\n 'level': 'DEBUG', 'handlers': ['info', 'console']}})\n", (4428, 5006), False, 'from logging.config import dictConfig\n'), ((705, 732), 'models.metric_manager.MetricManager.getInstance', 'MetricManager.getInstance', ([], {}), '()\n', (730, 732), False, 'from models.metric_manager import MetricManager\n'), ((663, 677), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (675, 677), False, 'from datetime import datetime\n'), ((2195, 2218), 'numpy.round', 'np.round', (['eval_score', '(3)'], {}), '(eval_score, 3)\n', (2203, 2218), True, 'import numpy as np\n')] |
from datetime import datetime
import discord
import itertools
from .utils import formatString, getUsageEmbed, getOopsEmbed
# IDEAS
# 1. Paying out points (without bets)
class DiscordPoints:
"""
Class that parses Discord Points info and interactions
Attributes
__________
fire (Fire obj): The fire instance where information is fetched/updated
Functions
__________
async getDiscordPointsEmbed(page, guild) -> (discord.Embed)
Makes an embedded message with total points for each user
def createNewReward(guild, rewardString) -> (discord.Embed)
Adds a reward and returns the updated list of rewards as an embedded msg
"""
fire = None
def __init__(self, fire):
self.fire = fire
async def getDiscordPointsEmbed(self, page, guild):
"""
Makes an embedded message with DiscordPoints for each member in the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
Returns
----------
discord.Embed
Embedded message of Discord Points for each member of the guild
"""
d = self.fire.fetchDiscordPoints(guild)
# This sorts the dictionary by highest-value and converts it to a list
# It takes form [(user_0.id, value_0) ...(user_n.id, value_n)]
info_arr = [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)]
userString, pointsString, description = await self.__createdEmbedStrings(guild, info_arr, page)
title = "Discord Points"
return self.__createPointsEmbed(title, description, userString, pointsString)
def createNewReward(self, guild, rewardString):
"""
Create new reward for the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
rewardString : string
String with the reward title and cost
Returns
----------
discord.Embed
Embedded message of the updated rewards for the server
"""
rewardStringList = ["".join(x) for _, x in itertools.groupby(rewardString, key=str.isdigit)]
if len(rewardStringList) < 2:
return getUsageEmbed(
"-addreward [Desired Reward] [Price of the Reward]\n\nexample: -addreward CSGO with friends 500")
try:
rewardCost = int(rewardStringList[len(rewardStringList) - 1])
rewardTitle = self.__parseRewardStringList(rewardStringList)
self.fire.postNewReward(guild, rewardTitle, rewardCost)
return self.getRewardsEmbed(guild)
except Exception as e:
print("ERROR ", e)
return getUsageEmbed(
"-addreward [Desired Reward] [Price of the Reward]\n\nexample: -addreward CSGO with friends 500")
def getRewardsEmbed(self, guild):
"""
Get all of the current rewards for the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
Returns
----------
discord.Embed
Embedded message with all of the rewards for the guild
"""
rewards_dict = self.fire.fetchAllRewards(guild)
if rewards_dict == {}:
return self.__noRewardsEmbed(guild)
rewardsList = [(k, rewards_dict[k]) for k in sorted(rewards_dict, key=rewards_dict.get, reverse=True)]
idString, rewardsString, costsString = self.__getRewardsEmbedStrings(rewardsList)
return self.__createRewardsEmbed(idString, rewardsString, costsString)
def redeemReward(self, guild, user, reward_id):
"""
Redeems the desired reward with DiscordPoints
[@Todo: Ping Users associated with the reward]
Parameters
----------
guild : discord.Guild
The server that we want to get information from
user : discord.Member if in guild, discord.User otherwise
The user that redeemed the reward
reward_id : Int
The id of the reward to redeem
Returns
----------
discord.Embed
Embedded message with the redeemed reward
"""
points_dict = self.fire.fetchDiscordPoints(guild)
rewards_dict = self.fire.fetchAllRewards(guild)
rewards_list = [(k, rewards_dict[k]) for k in sorted(rewards_dict, key=rewards_dict.get, reverse=True)]
try:
# Check to see if the reward_id is within the list of rewards
if int(reward_id) > len(rewards_list) or int(reward_id) < 1:
return self.__createNotARewardEmbed()
reward_title = rewards_list[int(reward_id) - 1][0]
reward_cost = rewards_list[int(reward_id) - 1][1]
# Check to see if the user has enough points to redeem the reward
if points_dict[str(user.id)] and points_dict[str(user.id)] < reward_cost:
return self.__createNotEnoughPointsEmbed(user, points_dict[str(user.id)])
else:
new_points = points_dict[str(user.id)] - reward_cost
self.fire.postNewDiscordPoints(guild, str(user.id), new_points)
return self.__createRedeemRewardEmbed(reward_title, reward_cost, user, new_points)
except Exception as e:
print(e)
return getUsageEmbed("-redeemReward [Desired Reward Id]\n\nexample: -redeemReward 3")
def addPoints(self, guild, author, user, points):
"""
add Points to a specific User
[@Todo: Ping Users associated with the points]
Parameters
----------
guild : discord.Guild
The server that we want to get information from
author : message.user
user : discord.Member if in guild, discord.User otherwise
The user that redeemed the reward
points : Int
The amount of points
Returns
----------
discord.Embed
Embedded message with the redeemed reward
"""
points_dict = self.fire.fetchDiscordPoints(guild)
print(user.id)
try:
if not str(user.id) in points_dict:
return getOopsEmbed("User ID not correct")
elif not author.guild_permissions.administrator:
return getOopsEmbed("Command can only be used by Server-Admins")
print(points_dict[str(user.id)])
new_points = points_dict[str(user.id)] + int(points)
print(new_points)
self.fire.postNewDiscordPoints(guild, str(user.id), new_points)
return self.__createPointsEmbed("Points added", "Points were added to balance", f"{user}", f"{new_points}")
except Exception as e:
print(e)
print("Error adding points")
return getOopsEmbed("Error adding points, check console")
# ---------- MARK: - Private Functions ----------
async def __createdEmbedStrings(self, guild, sortedList, page):
"""
Private helper function to create strings for the embedded message
Parameters
----------
guild : (discord.Guild)
The server that we are tracking
sortedList : arr[(key_0, val_0) ... (key_n, val_n)]
The sorted (by val) list of key, val pairs where key: user_id, val: points
page : (int)
Page of the message we want to look at (20 entries per page)
Returns
----------
discord.Embed
Formatted information embedded into a message
"""
member_dict = await self.fire.fetchAllMembers(guild)
# Max 20 entries / page
pages = len(sortedList) // 20 + 1
userString = ""
pointsString = ""
rankString = ""
if page > pages or page < 0:
page = 1
for i in range(0, 20):
shiftedIndex = (page - 1) * 20 + i
if shiftedIndex < len(sortedList):
user_id = sortedList[shiftedIndex][0]
points = sortedList[shiftedIndex][1]
if int(user_id) in member_dict.keys():
userString += member_dict[int(user_id)] + '\n'
pointsString += str(points) + '\n'
description = "Page " + str(page) + " of " + str(page)
return userString, pointsString, description
def __createPointsEmbed(self, title, description, userString, pointsString):
"""
Formats information into an embedded message
Parameters
----------
title: (str)
Title for the embedded message
description: (str)
Description for the embedded message
userString: (str)
String representing the list of ordered users
timeString: (str)
String representing the list of ordered points
rankString: (str)
String representing the ranks of each user
Returns
----------
discord.Embed
Formatted information embedded into a message
"""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="Username", value=userString)
embed.add_field(name="Discord Points", value=pointsString)
return embed
def __noRewardsEmbed(self, guild):
"""
Private function that shows that there are no rewards yet for the guild
Parameters
----------
guild : discord.Guild
The server that we want to get information from
Returns
----------
discord.Embed
Embedded message that states no rewards are in the guild
"""
now = datetime.today()
embed = discord.Embed(title="Oops!", description="", timestamp=now)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="No Rewards Set Yet!",
value="To add a reward:\n-addreward [Desired Reward] [Price of the Reward]")
return embed
def __getRewardsEmbedStrings(self, rewardsList):
"""
Private function that gets formatted strings for the list of rewards
Parameters
----------
rewardsList: [(reward_title_0, cost_0)...]
List of rewards sorted by the highest cost
Returns
----------
idString: string
String representing the id's of the rewards separated by '\n'
rewardString: string
String representing the title of the rewards separated by '\n'
costString: string
String representing the costs of the rewards separated by '\n'
"""
idString = ""
rewardString = ""
costString = ""
for i in range(len(rewardsList)):
numLines, formattedRewardString = formatString(str(rewardsList[i][0]))
idString += str(i + 1) + ("\n" * numLines)
rewardString += formattedRewardString + "\n"
costString += str(rewardsList[i][1]) + ("\n" * numLines)
return idString, rewardString, costString
def __createRewardsEmbed(self, idString, rewardString, costString):
"""
Private function to help create a rewards embed
Parameters
----------
idString: string
String representing the id's of the rewards separated by '\n'
rewardString: string
String representing the title of the rewards separated by '\n'
costString: string
String representing the costs of the rewards separated by '\n'
Returns
----------
discord.Embed
Embedded message that states all of the rewards
"""
title = "Discord Point Rewards"
description = ""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="ID", value=idString)
embed.add_field(name="Reward", value=rewardString)
embed.add_field(name="Price", value=costString)
return embed
def __createRedeemRewardEmbed(self, reward_title, reward_cost, user, new_points):
"""
Private function to help create a redeem reward embed
Parameters
----------
reward_title: string
Title of the reward to be redeemed
reward_cost : int
Cost of the reward to be redeemed
user : discord.Member if in guild, discord.User otherwise
User_id of the user that redeemed the reward
Returns
----------
discord.Embed
Embedded message that states the redeemed reward
"""
title = "Reward Redeemed"
description = ""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now)
embed.set_thumbnail(url=user.avatar_url)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="Reward", value=reward_title, inline=False)
embed.add_field(name="Price", value=reward_cost, inline=False)
embed.add_field(name="Points Remaining", value=str(new_points), inline=False)
return embed
def __createNotEnoughPointsEmbed(self, user, user_points):
"""
Private function to help create a not enough points embed message
Parameters
----------
user_points : int
The amount of points that the user currently has
user : discord.Member if in guild, discord.User otherwise
User that try to redeem the reward
Returns
----------
discord.Embed
Embedded message that states that the user doesn't have enough points
"""
title = "Oops!"
description = ""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now, colour=discord.Colour.red())
embed.set_thumbnail(url=user.avatar_url)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="Not enough points", value="You have: " + str(user_points))
return embed
def __createNotARewardEmbed(self):
"""
Private function to help create a "invalid reward id" embed
Returns
----------
discord.Embed
Embedded message that states that the reward id is invalid
"""
title = "Oops!"
description = ""
now = datetime.today()
embed = discord.Embed(title=title, description=description, timestamp=now, colour=discord.Colour.red())
embed.set_footer(text="Kirbec Bot", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.add_field(name="Not a reward", value="Please enter a valid reward id")
return embed
def __parseRewardStringList(self, rewardStringList):
"""
Private function to recreate reward title
Parameters
----------
rewardStringList: list(String)
List of strings representing the title
Returns
----------
s: string
The reward title string
"""
s = ""
for i in range(len(rewardStringList) - 1):
s += rewardStringList[i]
return s
| [
"datetime.datetime.today",
"discord.Colour.red",
"discord.Embed",
"itertools.groupby"
] | [((9243, 9259), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (9257, 9259), False, 'from datetime import datetime\n'), ((9276, 9342), 'discord.Embed', 'discord.Embed', ([], {'title': 'title', 'description': 'description', 'timestamp': 'now'}), '(title=title, description=description, timestamp=now)\n', (9289, 9342), False, 'import discord\n'), ((10010, 10026), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (10024, 10026), False, 'from datetime import datetime\n'), ((10043, 10102), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Oops!"""', 'description': '""""""', 'timestamp': 'now'}), "(title='Oops!', description='', timestamp=now)\n", (10056, 10102), False, 'import discord\n'), ((12150, 12166), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (12164, 12166), False, 'from datetime import datetime\n'), ((12183, 12249), 'discord.Embed', 'discord.Embed', ([], {'title': 'title', 'description': 'description', 'timestamp': 'now'}), '(title=title, description=description, timestamp=now)\n', (12196, 12249), False, 'import discord\n'), ((13225, 13241), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (13239, 13241), False, 'from datetime import datetime\n'), ((13258, 13324), 'discord.Embed', 'discord.Embed', ([], {'title': 'title', 'description': 'description', 'timestamp': 'now'}), '(title=title, description=description, timestamp=now)\n', (13271, 13324), False, 'import discord\n'), ((14418, 14434), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (14432, 14434), False, 'from datetime import datetime\n'), ((15212, 15228), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (15226, 15228), False, 'from datetime import datetime\n'), ((2182, 2230), 'itertools.groupby', 'itertools.groupby', (['rewardString'], {'key': 'str.isdigit'}), '(rewardString, key=str.isdigit)\n', (2199, 2230), False, 'import itertools\n'), ((14525, 14545), 'discord.Colour.red', 'discord.Colour.red', ([], {}), '()\n', (14543, 14545), False, 'import discord\n'), ((15319, 15339), 'discord.Colour.red', 'discord.Colour.red', ([], {}), '()\n', (15337, 15339), False, 'import discord\n')] |
from django.contrib import admin
from .models import MenuFacebook, MenuEmail, UserProfile, Occupation, FacebookRestaurant, EmailRestaurant
class MenuBaseAdmin(admin.ModelAdmin):
list_display = ('id', 'format_date', 'is_lunch', 'message')
list_filter = ('created_date', 'is_lunch')
list_editable = ('is_lunch',)
ordering = ['-created_date']
def format_date(self, obj):
return obj.created_date.strftime('%Y-%m-%d, %R')
class RestaurantAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
class UserProfileInline(admin.StackedInline):
model = UserProfile.restaurants.through
class UserProfileAdmin(admin.ModelAdmin):
inlines = UserProfileInline,
list_display = ('name', 'restaurants_list',)
def get_inline_instances(self, request, obj=None):
if not obj:
return []
return super(UserProfileAdmin, self).get_inline_instances(request, obj)
def name(self, obj):
return obj.user.username
def restaurants_list(self, obj):
return "\n".join([a.name for a in obj.restaurants.all()])
class SeatAdmin(admin.ModelAdmin):
list_display = ('id', 'restaurant', 'seats_taken', 'seats_total', 'date_declared')
def restaurant(self, obj):
return obj.restaurant.name
admin.site.register(FacebookRestaurant, RestaurantAdmin)
admin.site.register(EmailRestaurant, RestaurantAdmin)
admin.site.register(MenuFacebook, MenuBaseAdmin)
admin.site.register(MenuEmail, MenuBaseAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(Occupation, SeatAdmin)
| [
"django.contrib.admin.site.register"
] | [((1279, 1335), 'django.contrib.admin.site.register', 'admin.site.register', (['FacebookRestaurant', 'RestaurantAdmin'], {}), '(FacebookRestaurant, RestaurantAdmin)\n', (1298, 1335), False, 'from django.contrib import admin\n'), ((1336, 1389), 'django.contrib.admin.site.register', 'admin.site.register', (['EmailRestaurant', 'RestaurantAdmin'], {}), '(EmailRestaurant, RestaurantAdmin)\n', (1355, 1389), False, 'from django.contrib import admin\n'), ((1390, 1438), 'django.contrib.admin.site.register', 'admin.site.register', (['MenuFacebook', 'MenuBaseAdmin'], {}), '(MenuFacebook, MenuBaseAdmin)\n', (1409, 1438), False, 'from django.contrib import admin\n'), ((1439, 1484), 'django.contrib.admin.site.register', 'admin.site.register', (['MenuEmail', 'MenuBaseAdmin'], {}), '(MenuEmail, MenuBaseAdmin)\n', (1458, 1484), False, 'from django.contrib import admin\n'), ((1485, 1535), 'django.contrib.admin.site.register', 'admin.site.register', (['UserProfile', 'UserProfileAdmin'], {}), '(UserProfile, UserProfileAdmin)\n', (1504, 1535), False, 'from django.contrib import admin\n'), ((1536, 1578), 'django.contrib.admin.site.register', 'admin.site.register', (['Occupation', 'SeatAdmin'], {}), '(Occupation, SeatAdmin)\n', (1555, 1578), False, 'from django.contrib import admin\n')] |
""" Module containing helper routines for routes """
from typing import Dict, Any, Set, List, Tuple
import numpy as np
from route_distances.utils.type_utils import StrDict
def calc_depth(tree_dict: StrDict, depth: int = 0) -> int:
"""
Calculate the depth of a route, recursively
:param tree_dict: the route
:param depth: the current depth, don't specify for route
"""
children = tree_dict.get("children", [])
if children:
return max(calc_depth(child, depth + 1) for child in children)
return depth
def calc_llr(tree_dict: StrDict) -> int:
"""
Calculate the longest linear route for a synthetic route
:param tree_dict: the route
"""
return calc_depth(tree_dict) // 2
def extract_leaves(
tree_dict: StrDict,
) -> Set[str]:
"""
Extract a set with the SMILES of all the leaf nodes, i.e.
starting material
:param tree_dict: the route
:return: a set of SMILE strings
"""
def traverse(tree_dict: StrDict, leaves: Set[str]) -> None:
children = tree_dict.get("children", [])
if children:
for child in children:
traverse(child, leaves)
else:
leaves.add(tree_dict["smiles"])
leaves = set()
traverse(tree_dict, leaves)
return leaves
def is_solved(route: StrDict) -> bool:
"""
Find if a route is solved, i.e. if all starting material
is in stock.
To be accurate, each molecule node need to have an extra
boolean property called `in_stock`.
:param route: the route to analyze
"""
def find_leaves_not_in_stock(tree_dict: StrDict) -> None:
children = tree_dict.get("children", [])
if not children and not tree_dict.get("in_stock", True):
raise ValueError(f"child not in stock {tree_dict}")
elif children:
for child in children:
find_leaves_not_in_stock(child)
try:
find_leaves_not_in_stock(route)
except ValueError:
return False
return True
def route_score(
tree_dict: StrDict,
mol_costs: Dict[bool, float] = None,
average_yield=0.8,
reaction_cost=1.0,
) -> float:
"""
Calculate the score of route using the method from
(Badowski et al. Chem Sci. 2019, 10, 4640).
The reaction cost is constant and the yield is an average yield.
The starting materials are assigned a cost based on whether they are in
stock or not. By default starting material in stock is assigned a
cost of 1 and starting material not in stock is assigned a cost of 10.
To be accurate, each molecule node need to have an extra
boolean property called `in_stock`.
:param tree_dict: the route to analyze
:param mol_costs: the starting material cost
:param average_yield: the average yield, defaults to 0.8
:param reaction_cost: the reaction cost, defaults to 1.0
:return: the computed cost
"""
mol_cost = mol_costs or {True: 1, False: 10}
reactions = tree_dict.get("children", [])
if not reactions:
return mol_cost[tree_dict.get("in_stock", True)]
child_sum = sum(
1 / average_yield * route_score(child) for child in reactions[0]["children"]
)
return reaction_cost + child_sum
def route_scorer(routes: List[StrDict]) -> Tuple[List[StrDict], List[float]]:
"""
Scores and sort a list of routes.
Returns a tuple of the sorted routes and their costs.
:param routes: the routes to score
:return: the sorted routes and their costs
"""
scores = np.asarray([route_score(route) for route in routes])
sorted_idx = np.argsort(scores)
routes = [routes[idx] for idx in sorted_idx]
return routes, scores[sorted_idx].tolist()
def route_ranks(scores: List[float]) -> List[int]:
"""
Compute the rank of route scores. Rank starts at 1
:param scores: the route scores
:return: a list of ranks for each route
"""
ranks = [1]
for idx in range(1, len(scores)):
if abs(scores[idx] - scores[idx - 1]) < 1e-8:
ranks.append(ranks[idx - 1])
else:
ranks.append(ranks[idx - 1] + 1)
return ranks
| [
"numpy.argsort"
] | [((3623, 3641), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (3633, 3641), True, 'import numpy as np\n')] |
from __future__ import print_function
from node.tests import NodeTestCase
from yafowil.base import factory
from yafowil.compat import IS_PY2
import lxml.etree as etree
import sys
import unittest
import yafowil.common
import yafowil.compound
import yafowil.persistence
import yafowil.table
if not IS_PY2:
from importlib import reload
class YafowilTestCase(NodeTestCase):
def setUp(self):
super(YafowilTestCase, self).setUp()
factory.clear()
reload(yafowil.persistence)
reload(yafowil.common)
reload(yafowil.compound)
reload(yafowil.table)
def fxml(xml):
et = etree.fromstring(xml)
return etree.tostring(et, pretty_print=True).decode('utf-8')
def pxml(xml):
print(fxml(xml))
def test_suite():
from yafowil.tests import test_base
from yafowil.tests import test_common
from yafowil.tests import test_compound
from yafowil.tests import test_controller
from yafowil.tests import test_persistence
from yafowil.tests import test_resources
from yafowil.tests import test_table
from yafowil.tests import test_tsf
from yafowil.tests import test_utils
suite = unittest.TestSuite()
suite.addTest(unittest.findTestCases(test_base))
suite.addTest(unittest.findTestCases(test_common))
suite.addTest(unittest.findTestCases(test_compound))
suite.addTest(unittest.findTestCases(test_controller))
suite.addTest(unittest.findTestCases(test_persistence))
suite.addTest(unittest.findTestCases(test_resources))
suite.addTest(unittest.findTestCases(test_table))
suite.addTest(unittest.findTestCases(test_tsf))
suite.addTest(unittest.findTestCases(test_utils))
return suite
def run_tests():
from zope.testrunner.runner import Runner
runner = Runner(found_suites=[test_suite()])
runner.run()
sys.exit(int(runner.failed))
if __name__ == '__main__':
run_tests()
| [
"unittest.TestSuite",
"yafowil.base.factory.clear",
"importlib.reload",
"lxml.etree.fromstring",
"unittest.findTestCases",
"lxml.etree.tostring"
] | [((625, 646), 'lxml.etree.fromstring', 'etree.fromstring', (['xml'], {}), '(xml)\n', (641, 646), True, 'import lxml.etree as etree\n'), ((1168, 1188), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (1186, 1188), False, 'import unittest\n'), ((453, 468), 'yafowil.base.factory.clear', 'factory.clear', ([], {}), '()\n', (466, 468), False, 'from yafowil.base import factory\n'), ((477, 504), 'importlib.reload', 'reload', (['yafowil.persistence'], {}), '(yafowil.persistence)\n', (483, 504), False, 'from importlib import reload\n'), ((513, 535), 'importlib.reload', 'reload', (['yafowil.common'], {}), '(yafowil.common)\n', (519, 535), False, 'from importlib import reload\n'), ((544, 568), 'importlib.reload', 'reload', (['yafowil.compound'], {}), '(yafowil.compound)\n', (550, 568), False, 'from importlib import reload\n'), ((577, 598), 'importlib.reload', 'reload', (['yafowil.table'], {}), '(yafowil.table)\n', (583, 598), False, 'from importlib import reload\n'), ((1208, 1241), 'unittest.findTestCases', 'unittest.findTestCases', (['test_base'], {}), '(test_base)\n', (1230, 1241), False, 'import unittest\n'), ((1261, 1296), 'unittest.findTestCases', 'unittest.findTestCases', (['test_common'], {}), '(test_common)\n', (1283, 1296), False, 'import unittest\n'), ((1316, 1353), 'unittest.findTestCases', 'unittest.findTestCases', (['test_compound'], {}), '(test_compound)\n', (1338, 1353), False, 'import unittest\n'), ((1373, 1412), 'unittest.findTestCases', 'unittest.findTestCases', (['test_controller'], {}), '(test_controller)\n', (1395, 1412), False, 'import unittest\n'), ((1432, 1472), 'unittest.findTestCases', 'unittest.findTestCases', (['test_persistence'], {}), '(test_persistence)\n', (1454, 1472), False, 'import unittest\n'), ((1492, 1530), 'unittest.findTestCases', 'unittest.findTestCases', (['test_resources'], {}), '(test_resources)\n', (1514, 1530), False, 'import unittest\n'), ((1550, 1584), 'unittest.findTestCases', 'unittest.findTestCases', (['test_table'], {}), '(test_table)\n', (1572, 1584), False, 'import unittest\n'), ((1604, 1636), 'unittest.findTestCases', 'unittest.findTestCases', (['test_tsf'], {}), '(test_tsf)\n', (1626, 1636), False, 'import unittest\n'), ((1656, 1690), 'unittest.findTestCases', 'unittest.findTestCases', (['test_utils'], {}), '(test_utils)\n', (1678, 1690), False, 'import unittest\n'), ((658, 695), 'lxml.etree.tostring', 'etree.tostring', (['et'], {'pretty_print': '(True)'}), '(et, pretty_print=True)\n', (672, 695), True, 'import lxml.etree as etree\n')] |
import os
import dgl
import time
import argparse
import numpy as np
import torch as th
import distutils.util
import torch.nn.functional as F
import utils
import models
import data_loader
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
dev = th.device('cuda' if th.cuda.is_available() else 'cpu')
if __name__ == '__main__':
argparser = argparse.ArgumentParser("training")
argparser.add_argument('--adj-path', type=str, default='../data/adj_matrix_formal_stage.pkl')
argparser.add_argument('--feat-path', type=str, default='../data/feature_formal_stage.npy')
argparser.add_argument('--label-path', type=str, default='../data/train_labels_formal_stage.npy')
argparser.add_argument('--output-dir', type=str, default='./saved_models/')
argparser.add_argument('--output-name', type=str, default='tagcn_128_3.pkl')
argparser.add_argument('--if-load-model', type=lambda x: bool(distutils.util.strtobool(x)), default=False)
argparser.add_argument('--model-dir', type=str, default='./saved_models/')
argparser.add_argument('--model-name', type=str, default='tagcn_128_3.pkl')
argparser.add_argument('--num-epochs', type=int, default=5000)
argparser.add_argument('--num-hidden', type=int, default=128)
argparser.add_argument('--num-layers', type=int, default=3)
argparser.add_argument('--lr', type=float, default=0.001)
argparser.add_argument('--dropout', type=float, default=0.1)
argparser.add_argument('--adj-norm', type=lambda x: bool(distutils.util.strtobool(x)), default=True)
argparser.add_argument('--feat-norm', type=str, default=None)
args = argparser.parse_args()
print(vars(args))
dataset = data_loader.KddDataset(args.adj_path, args.feat_path, args.label_path, indices)
adj = dataset.adj
features = dataset.features
labels = dataset.labels
train_mask = dataset.train_mask
val_mask = dataset.val_mask
test_mask = dataset.test_mask
size_raw = features.shape[0]
size_reduced = size_raw - 50000
graph = dgl.DGLGraph()
if args.adj_norm:
adj = utils.adj_preprocess(adj)
feat_norm_func = utils.feat_norm(args.feat_norm)
graph.from_scipy_sparse_matrix(adj)
features = th.FloatTensor(features).to(dev)
features[th.where(features < -1.0)[0]] = 0
features[th.where(features > 1.0)[0]] = 0
features = feat_norm_func(features)
labels = th.LongTensor(labels).to(dev)
graph.ndata['features'] = features
model = models.TAGCN(100, args.num_hidden, 20, args.num_layers, activation=F.leaky_relu, dropout=args.dropout)
if args.if_load_model:
model_states = th.load(os.path.join(args.model_dir, args.model_name), map_location=dev)
model.load_state_dict(model_states)
model = model.to(dev)
optimizer = th.optim.Adam(model.parameters(), lr=args.lr)
dur = []
for epoch in range(args.num_epochs):
t0 = time.time()
logits = model(graph, features).to(dev)
logp = F.log_softmax(logits, 1)[:size_reduced]
loss = F.nll_loss(logp[train_mask], labels[train_mask]).to(dev)
optimizer.zero_grad()
loss.backward()
optimizer.step()
dur.append(time.time() - t0)
if epoch % 10 == 0:
train_acc = utils.compute_acc(logp, labels, train_mask)
val_acc = utils.compute_acc(logp, labels, val_mask)
print('Epoch {:05d} | Loss {:.4f} | Train Acc {:.4f} | Val Acc {:.4f} '
'| Time(s) {:.4f} | GPU {:.1f} MiB'.format(
epoch, loss, train_acc, val_acc, np.mean(dur), th.cuda.max_memory_allocated() / 1000000))
th.save(model.state_dict(), os.path.join(args.output_dir, args.output_name))
| [
"utils.feat_norm",
"numpy.mean",
"utils.compute_acc",
"argparse.ArgumentParser",
"torch.nn.functional.nll_loss",
"torch.LongTensor",
"os.path.join",
"torch.FloatTensor",
"data_loader.KddDataset",
"utils.adj_preprocess",
"torch.cuda.is_available",
"dgl.DGLGraph",
"torch.nn.functional.log_soft... | [((336, 371), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""training"""'], {}), "('training')\n", (359, 371), False, 'import argparse\n'), ((1666, 1745), 'data_loader.KddDataset', 'data_loader.KddDataset', (['args.adj_path', 'args.feat_path', 'args.label_path', 'indices'], {}), '(args.adj_path, args.feat_path, args.label_path, indices)\n', (1688, 1745), False, 'import data_loader\n'), ((2012, 2026), 'dgl.DGLGraph', 'dgl.DGLGraph', ([], {}), '()\n', (2024, 2026), False, 'import dgl\n'), ((2110, 2141), 'utils.feat_norm', 'utils.feat_norm', (['args.feat_norm'], {}), '(args.feat_norm)\n', (2125, 2141), False, 'import utils\n'), ((2458, 2565), 'models.TAGCN', 'models.TAGCN', (['(100)', 'args.num_hidden', '(20)', 'args.num_layers'], {'activation': 'F.leaky_relu', 'dropout': 'args.dropout'}), '(100, args.num_hidden, 20, args.num_layers, activation=F.\n leaky_relu, dropout=args.dropout)\n', (2470, 2565), False, 'import models\n'), ((256, 278), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (276, 278), True, 'import torch as th\n'), ((2063, 2088), 'utils.adj_preprocess', 'utils.adj_preprocess', (['adj'], {}), '(adj)\n', (2083, 2088), False, 'import utils\n'), ((2885, 2896), 'time.time', 'time.time', ([], {}), '()\n', (2894, 2896), False, 'import time\n'), ((3635, 3682), 'os.path.join', 'os.path.join', (['args.output_dir', 'args.output_name'], {}), '(args.output_dir, args.output_name)\n', (3647, 3682), False, 'import os\n'), ((2197, 2221), 'torch.FloatTensor', 'th.FloatTensor', (['features'], {}), '(features)\n', (2211, 2221), True, 'import torch as th\n'), ((2243, 2268), 'torch.where', 'th.where', (['(features < -1.0)'], {}), '(features < -1.0)\n', (2251, 2268), True, 'import torch as th\n'), ((2290, 2314), 'torch.where', 'th.where', (['(features > 1.0)'], {}), '(features > 1.0)\n', (2298, 2314), True, 'import torch as th\n'), ((2376, 2397), 'torch.LongTensor', 'th.LongTensor', (['labels'], {}), '(labels)\n', (2389, 2397), True, 'import torch as th\n'), ((2620, 2665), 'os.path.join', 'os.path.join', (['args.model_dir', 'args.model_name'], {}), '(args.model_dir, args.model_name)\n', (2632, 2665), False, 'import os\n'), ((2960, 2984), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits', '(1)'], {}), '(logits, 1)\n', (2973, 2984), True, 'import torch.nn.functional as F\n'), ((3242, 3285), 'utils.compute_acc', 'utils.compute_acc', (['logp', 'labels', 'train_mask'], {}), '(logp, labels, train_mask)\n', (3259, 3285), False, 'import utils\n'), ((3308, 3349), 'utils.compute_acc', 'utils.compute_acc', (['logp', 'labels', 'val_mask'], {}), '(logp, labels, val_mask)\n', (3325, 3349), False, 'import utils\n'), ((3015, 3063), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['logp[train_mask]', 'labels[train_mask]'], {}), '(logp[train_mask], labels[train_mask])\n', (3025, 3063), True, 'import torch.nn.functional as F\n'), ((3171, 3182), 'time.time', 'time.time', ([], {}), '()\n', (3180, 3182), False, 'import time\n'), ((3545, 3557), 'numpy.mean', 'np.mean', (['dur'], {}), '(dur)\n', (3552, 3557), True, 'import numpy as np\n'), ((3559, 3589), 'torch.cuda.max_memory_allocated', 'th.cuda.max_memory_allocated', ([], {}), '()\n', (3587, 3589), True, 'import torch as th\n')] |
from __future__ import print_function
from configurations import configuration
from pymongo import MongoClient
MONGO_HOST= configuration.MONGO_HOST
client = MongoClient(MONGO_HOST)
class DBConnection():
def getConnection(self):
return client.analyticsDB
| [
"pymongo.MongoClient"
] | [((158, 181), 'pymongo.MongoClient', 'MongoClient', (['MONGO_HOST'], {}), '(MONGO_HOST)\n', (169, 181), False, 'from pymongo import MongoClient\n')] |
from typing import Any, List
from dataclasses import dataclass, replace
from .consts import OK
class Data:
def replace(self, **kwargs):
return replace(self, **kwargs)
@dataclass(frozen=True)
class RpcCall(Data):
route: str
service: str
method: str
args: List[Any]
@dataclass(frozen=True)
class RpcResp(Data):
status: int
body: Any = None
@property
def ok(self):
return self.status == OK
| [
"dataclasses.dataclass",
"dataclasses.replace"
] | [((186, 208), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (195, 208), False, 'from dataclasses import dataclass, replace\n'), ((301, 323), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (310, 323), False, 'from dataclasses import dataclass, replace\n'), ((159, 182), 'dataclasses.replace', 'replace', (['self'], {}), '(self, **kwargs)\n', (166, 182), False, 'from dataclasses import dataclass, replace\n')] |
#!/usr/bin/python
import timeit
setup = '''
import os
def FileTest(path):
file = open(path, "r")
lines = file.readlines()
data = [None for i in range(len(lines))]
i = 0
for line in lines:
data[i] = line.split(',')
j = 0
for field in data[i]:
data[i][j] = field.strip('\\'\\n')
j += 1
i += 1
return data
'''
elapsed = timeit.timeit("FileTest(os.getcwd() + '/../employees.txt')", setup=setup, number=1)
print(elapsed * 1000.0, "ms - cold")
elapsed = timeit.timeit("FileTest(os.getcwd() + '/../employees.txt')", setup=setup, number=1)
print(elapsed * 1000.0, "ms - warm")
| [
"timeit.timeit"
] | [((345, 432), 'timeit.timeit', 'timeit.timeit', (['"""FileTest(os.getcwd() + \'/../employees.txt\')"""'], {'setup': 'setup', 'number': '(1)'}), '("FileTest(os.getcwd() + \'/../employees.txt\')", setup=setup,\n number=1)\n', (358, 432), False, 'import timeit\n'), ((477, 564), 'timeit.timeit', 'timeit.timeit', (['"""FileTest(os.getcwd() + \'/../employees.txt\')"""'], {'setup': 'setup', 'number': '(1)'}), '("FileTest(os.getcwd() + \'/../employees.txt\')", setup=setup,\n number=1)\n', (490, 564), False, 'import timeit\n')] |
# -*- coding: utf-8 -*-
'''
Created on 25.9.2011
@author: xaralis
'''
from model_utils import Choices
SEXES = Choices(
(1, 'FEMALE', u'žena'),
(2, 'MALE', u'muž')
)
NATIONALITIES = Choices(
(1, 'CZ', u'Česká republika'),
(2, 'EU', u'Jiné - EU'),
(3, 'NON_EU', u'Jiné - non-EU'),
(4, 'UNKNOWN', u'Neznámo')
)
ETHNIC_ORIGINS = Choices(
(1, 'NON_GYPSY', u'Ne-romská'),
(2, 'GYPSY', u'Romská'),
(3, 'NOT_MONITORED', u'Nesledováno')
)
LIVING_CONDITIONS = Choices(
(1, 'ALONE', u'Sám'),
(2, 'WITH_FAMILY', u'S rodiči/rodinou'),
(3, 'WITH_FRIENDS', u'S přáteli'),
(4, 'WITH_PARTNER', u'S partnerem'),
(5, 'WITH_PARTNER_AND_CHILDREN', u'S partnerem a dítětem'),
(6, 'ALONE_WITH_CHILDREN', u'Sám s dítětem'),
(7, 'UNKNOWN', u'Není známo')
)
ACCOMODATION_TYPES = Choices(
(1, 'WITH_PARENTS', u'Doma (u rodičů)'),
(2, 'OWN_FLAT', u'Vlastní byt (i pronajatý)'),
(3, 'FOREIGN_FLAT', u'Cizí byt'),
(4, 'PUBLIC_ACCOMODATION', u'Ubytovna'),
(5, 'SQUAT', u'Squat'),
(6, 'BARRACKS', u'Kasárna'),
(7, 'HOMELESS', u'Bez domova, na ulici'),
(8, 'UNKNOWN', u'Není známo')
)
EMPLOYMENT_TYPES = Choices(
(1, 'REGULAR', u'Pravidelné zam.'),
(2, 'SCHOOL', u'Škola'),
(3, 'OCCASIONAL_WORK', u'Příležitostná práce'),
(4, 'REGISTERED_ON_EB', u'Registrován na ÚP'),
(5, 'NO_EMPLOYMENT', u'Bez zaměstnání'),
(6, 'STATE_SUPPORT', u'Dávky SZ'),
(8, 'UNKNOWN', u'Není známo')
)
EDUCATION_LEVELS = Choices(
(1, 'BASIC', u'Základní'),
(2, 'PRACTICAL_SECONDARY', u'Vyučen'),
(3, 'SECONDARY', u'Střední s maturitou'),
(4, 'HIGHER_PRACTICAL', u'Vyšší odborné'),
(5, 'UNIVERSITY_GRADE', u'Vysokoškolské'),
(6, 'BASIC_NOT_COMPLETED', u'Neukončené základní'),
(7, 'UNKNOWN', u'Není známo')
)
DRUGS = Choices( # (Numbers reflect the old drug ids.)
(3, 'METHAMPHETAMINE', u'Pervitin, jiné amfetaminy'),
(4, 'SUBUTEX_LEGAL', u'Subutex, Ravata, Buprenorphine alkaloid - legálně'),
(5, 'TOBACCO', u'Tabák'),
(8, 'THC', u'THC'),
(9, 'ECSTASY', u'Extáze'),
(10, 'DESIGNER_DRUGS', u'Designer drugs'),
(11, 'HEROIN', u'Heroin'),
(12, 'BRAUN', u'Braun a jiné opiáty'),
(13, 'RAW_OPIUM', u'Surové opium'),
(14, 'SUBUTEX_ILLEGAL', u'Subutex, Ravata, Buprenorphine alkaloid - ilegálně'),
(16, 'ALCOHOL', u'Alkohol',),
(17, 'INHALER_DRUGS', u'Inhalační látky, ředidla'),
(18, 'MEDICAMENTS', u'Medikamenty'),
(19, 'METHADONE', u'Metadon'),
(20, 'COCAINE', u'Kokain, crack'),
(21, 'SUBOXONE', u'Suboxone'),
(22, 'VENDAL', u'Vendal'),
(23, 'LSD', u'LSD'),
(24, 'PSYLOCIBE', u'Lysohlávky'),
(28, 'FENTANYL', u'Fentanyl'),
(25, 'UNKNOWN', u'Neznámo'),
(26, 'PATHOLOGICAL_GAMBLING', u'Patologické hráčství'),
(27, 'OTHER_NON_SUBSTANCE_ADDICTION', u'Jiná nelátková závislost'),
)
# Disable `application`, `first_try_application` and `primary_drug_usage` fields for these drugs
NON_APPLICATION_DRUGS = ['26', '27']
DRUG_APPLICATION_FREQUENCY = Choices(
(1, 'LESS_THAN_3X_A_MONTH', u'méně než 3x měsíčně'),
(2, 'ONCE_A_WEEK', u'1x týdně'),
(3, 'ON_WEEKENDS', u'víkendově'),
(4, 'EVERY_SECOND_DAY', u'obden'),
(5, 'DAILY', u'denně'),
(6, '2X_3X_A_DAY', u'2-3x denně'),
(7, 'MORE_THAN_3X_A_DAY', u'více než 3x denně'),
(8, 'NONE_FOR_MORE_THAN_6_MONTHS', u'neužita déle než 6 měsíců'),
# (9, 'NONE_FOR_LAST_6_MONTHS', u'neužita posledních 6 měsíců'), # Feature 103
(10, 'NONE_FOR_LAST_3_MONTHS', u'neužita poslední 3 měsíce'),
(11, 'NONE_FOR_LAST_1_MONTH', u'neužita v posledním měsíci'),
(12, 'UNKNOWN', u'Není známo')
)
DRUG_APPLICATION_TYPES = Choices(
(1, 'VEIN_INJECTION', u'injekčně do žíly'),
(2, 'MUSCLE_INJECTION', u'injekčně do svalu'),
(3, 'ORAL', u'ústně'),
(4, 'SNIFFING', u'sniff (šňupání)'),
(5, 'SMOKING', u'kouření'),
(6, 'INHALATION', u'inhalace'),
(7, 'UNKNOWN', u'Není známo')
)
RISKY_BEHAVIOR_KIND = Choices(
(1, 'EQUIPMENT_SHARING', u'Sdílení náčiní'),
(2, 'SEX_WITHOUT_PROTECTION', u'Nechráněný sex'),
(3, 'SYRINGE_SHARING', u'Sdílení jehel'),
(4, 'INTRAVENOUS_APPLICATION', u'Nitrožilní aplikace'),
(5, 'RISKY_APPLICATION', u'Riziková aplikace'),
(6, 'OVERDOSING', u'Předávkování'),
(7, 'HEALTH_COMPLICATIONS', u'Zdravotní komplikace')
)
RISKY_BEHAVIOR_PERIODICITY = Choices(
(1, 'NEVER', u'Nikdy'),
(2, 'ONCE', u'Jednorázově'),
(3, 'OFTEN', u'Opakovaně '),
(4, 'UNKNOWN', u'Není známo')
)
DISEASES = Choices(
(1, 'HIV', u'HIV'),
(2, 'VHA', u'VHA'),
(3, 'VHB', u'VHB'),
(4, 'VHC', u'VHC'),
(5, 'SYFILIS', u'Syfilis'),
)
DISEASE_TEST_RESULTS = Choices(
(0, 'UNKNOWN', u'Neznámo, zda testován'),
(1, 'TESTED_POSITIVE', u'Testován - pozitivní'),
(2, 'TESTED_NEGATIVE', u'Testován - negativní'),
(3, 'TESTED_UNKNOWN', u'Testován - výsledek neznámý'),
(4, 'NOT_TESTED', u'Nikdy netestován'),
(5, 'RESULT_NOT_ACCLAIMED', u'Nevyzvedl výsledek'),
)
DISEASE_TEST_SIGN = Choices(
('p', 'POSITIVE', u'Pozitivní'),
('n', 'NEGATIVE', u'Negativní'),
('r', 'REACTIVE', u'Reaktivní'),
('i', 'INCONCLUSIVE', u'Test neprůkazný')
)
ANONYMOUS_TYPES = Choices(
(1, 'NON_USER', u'neuživatel'),
(2, 'NON_IV', u'neIV'),
(3, 'IV', u'IV'),
(4, 'NON_USER_PARENT', u'rodič'),
(5, 'THC', u'THC')
)
def get_drug_by_id(id):
for drug in DRUGS:
if drug[0] == id:
return drug
| [
"model_utils.Choices"
] | [((112, 164), 'model_utils.Choices', 'Choices', (["(1, 'FEMALE', u'žena')", "(2, 'MALE', u'muž')"], {}), "((1, 'FEMALE', u'žena'), (2, 'MALE', u'muž'))\n", (119, 164), False, 'from model_utils import Choices\n'), ((191, 319), 'model_utils.Choices', 'Choices', (["(1, 'CZ', u'Česká republika')", "(2, 'EU', u'Jiné - EU')", "(3, 'NON_EU', u'Jiné - non-EU')", "(4, 'UNKNOWN', u'Neznámo')"], {}), "((1, 'CZ', u'Česká republika'), (2, 'EU', u'Jiné - EU'), (3,\n 'NON_EU', u'Jiné - non-EU'), (4, 'UNKNOWN', u'Neznámo'))\n", (198, 319), False, 'from model_utils import Choices\n'), ((351, 457), 'model_utils.Choices', 'Choices', (["(1, 'NON_GYPSY', u'Ne-romská')", "(2, 'GYPSY', u'Romská')", "(3, 'NOT_MONITORED', u'Nesledováno')"], {}), "((1, 'NON_GYPSY', u'Ne-romská'), (2, 'GYPSY', u'Romská'), (3,\n 'NOT_MONITORED', u'Nesledováno'))\n", (358, 457), False, 'from model_utils import Choices\n'), ((488, 779), 'model_utils.Choices', 'Choices', (["(1, 'ALONE', u'Sám')", "(2, 'WITH_FAMILY', u'S rodiči/rodinou')", "(3, 'WITH_FRIENDS', u'S přáteli')", "(4, 'WITH_PARTNER', u'S partnerem')", "(5, 'WITH_PARTNER_AND_CHILDREN', u'S partnerem a dítětem')", "(6, 'ALONE_WITH_CHILDREN', u'Sám s dítětem')", "(7, 'UNKNOWN', u'Není známo')"], {}), "((1, 'ALONE', u'Sám'), (2, 'WITH_FAMILY', u'S rodiči/rodinou'), (3,\n 'WITH_FRIENDS', u'S přáteli'), (4, 'WITH_PARTNER', u'S partnerem'), (5,\n 'WITH_PARTNER_AND_CHILDREN', u'S partnerem a dítětem'), (6,\n 'ALONE_WITH_CHILDREN', u'Sám s dítětem'), (7, 'UNKNOWN', u'Není známo'))\n", (495, 779), False, 'from model_utils import Choices\n'), ((819, 1131), 'model_utils.Choices', 'Choices', (["(1, 'WITH_PARENTS', u'Doma (u rodičů)')", "(2, 'OWN_FLAT', u'Vlastní byt (i pronajatý)')", "(3, 'FOREIGN_FLAT', u'Cizí byt')", "(4, 'PUBLIC_ACCOMODATION', u'Ubytovna')", "(5, 'SQUAT', u'Squat')", "(6, 'BARRACKS', u'Kasárna')", "(7, 'HOMELESS', u'Bez domova, na ulici')", "(8, 'UNKNOWN', u'Není známo')"], {}), "((1, 'WITH_PARENTS', u'Doma (u rodičů)'), (2, 'OWN_FLAT',\n u'Vlastní byt (i pronajatý)'), (3, 'FOREIGN_FLAT', u'Cizí byt'), (4,\n 'PUBLIC_ACCOMODATION', u'Ubytovna'), (5, 'SQUAT', u'Squat'), (6,\n 'BARRACKS', u'Kasárna'), (7, 'HOMELESS', u'Bez domova, na ulici'), (8,\n 'UNKNOWN', u'Není známo'))\n", (826, 1131), False, 'from model_utils import Choices\n'), ((1169, 1451), 'model_utils.Choices', 'Choices', (["(1, 'REGULAR', u'Pravidelné zam.')", "(2, 'SCHOOL', u'Škola')", "(3, 'OCCASIONAL_WORK', u'Příležitostná práce')", "(4, 'REGISTERED_ON_EB', u'Registrován na ÚP')", "(5, 'NO_EMPLOYMENT', u'Bez zaměstnání')", "(6, 'STATE_SUPPORT', u'Dávky SZ')", "(8, 'UNKNOWN', u'Není známo')"], {}), "((1, 'REGULAR', u'Pravidelné zam.'), (2, 'SCHOOL', u'Škola'), (3,\n 'OCCASIONAL_WORK', u'Příležitostná práce'), (4, 'REGISTERED_ON_EB',\n u'Registrován na ÚP'), (5, 'NO_EMPLOYMENT', u'Bez zaměstnání'), (6,\n 'STATE_SUPPORT', u'Dávky SZ'), (8, 'UNKNOWN', u'Není známo'))\n", (1176, 1451), False, 'from model_utils import Choices\n'), ((1489, 1790), 'model_utils.Choices', 'Choices', (["(1, 'BASIC', u'Základní')", "(2, 'PRACTICAL_SECONDARY', u'Vyučen')", "(3, 'SECONDARY', u'Střední s maturitou')", "(4, 'HIGHER_PRACTICAL', u'Vyšší odborné')", "(5, 'UNIVERSITY_GRADE', u'Vysokoškolské')", "(6, 'BASIC_NOT_COMPLETED', u'Neukončené základní')", "(7, 'UNKNOWN', u'Není známo')"], {}), "((1, 'BASIC', u'Základní'), (2, 'PRACTICAL_SECONDARY', u'Vyučen'), (\n 3, 'SECONDARY', u'Střední s maturitou'), (4, 'HIGHER_PRACTICAL',\n u'Vyšší odborné'), (5, 'UNIVERSITY_GRADE', u'Vysokoškolské'), (6,\n 'BASIC_NOT_COMPLETED', u'Neukončené základní'), (7, 'UNKNOWN',\n u'Není známo'))\n", (1496, 1790), False, 'from model_utils import Choices\n'), ((1812, 2783), 'model_utils.Choices', 'Choices', (["(3, 'METHAMPHETAMINE', u'Pervitin, jiné amfetaminy')", "(4, 'SUBUTEX_LEGAL', u'Subutex, Ravata, Buprenorphine alkaloid - legálně')", "(5, 'TOBACCO', u'Tabák')", "(8, 'THC', u'THC')", "(9, 'ECSTASY', u'Extáze')", "(10, 'DESIGNER_DRUGS', u'Designer drugs')", "(11, 'HEROIN', u'Heroin')", "(12, 'BRAUN', u'Braun a jiné opiáty')", "(13, 'RAW_OPIUM', u'Surové opium')", "(14, 'SUBUTEX_ILLEGAL', u'Subutex, Ravata, Buprenorphine alkaloid - ilegálně')", "(16, 'ALCOHOL', u'Alkohol')", "(17, 'INHALER_DRUGS', u'Inhalační látky, ředidla')", "(18, 'MEDICAMENTS', u'Medikamenty')", "(19, 'METHADONE', u'Metadon')", "(20, 'COCAINE', u'Kokain, crack')", "(21, 'SUBOXONE', u'Suboxone')", "(22, 'VENDAL', u'Vendal')", "(23, 'LSD', u'LSD')", "(24, 'PSYLOCIBE', u'Lysohlávky')", "(28, 'FENTANYL', u'Fentanyl')", "(25, 'UNKNOWN', u'Neznámo')", "(26, 'PATHOLOGICAL_GAMBLING', u'Patologické hráčství')", "(27, 'OTHER_NON_SUBSTANCE_ADDICTION', u'Jiná nelátková závislost')"], {}), "((3, 'METHAMPHETAMINE', u'Pervitin, jiné amfetaminy'), (4,\n 'SUBUTEX_LEGAL', u'Subutex, Ravata, Buprenorphine alkaloid - legálně'),\n (5, 'TOBACCO', u'Tabák'), (8, 'THC', u'THC'), (9, 'ECSTASY', u'Extáze'),\n (10, 'DESIGNER_DRUGS', u'Designer drugs'), (11, 'HEROIN', u'Heroin'), (\n 12, 'BRAUN', u'Braun a jiné opiáty'), (13, 'RAW_OPIUM', u'Surové opium'\n ), (14, 'SUBUTEX_ILLEGAL',\n u'Subutex, Ravata, Buprenorphine alkaloid - ilegálně'), (16, 'ALCOHOL',\n u'Alkohol'), (17, 'INHALER_DRUGS', u'Inhalační látky, ředidla'), (18,\n 'MEDICAMENTS', u'Medikamenty'), (19, 'METHADONE', u'Metadon'), (20,\n 'COCAINE', u'Kokain, crack'), (21, 'SUBOXONE', u'Suboxone'), (22,\n 'VENDAL', u'Vendal'), (23, 'LSD', u'LSD'), (24, 'PSYLOCIBE',\n u'Lysohlávky'), (28, 'FENTANYL', u'Fentanyl'), (25, 'UNKNOWN',\n u'Neznámo'), (26, 'PATHOLOGICAL_GAMBLING', u'Patologické hráčství'), (\n 27, 'OTHER_NON_SUBSTANCE_ADDICTION', u'Jiná nelátková závislost'))\n", (1819, 2783), False, 'from model_utils import Choices\n'), ((3028, 3548), 'model_utils.Choices', 'Choices', (["(1, 'LESS_THAN_3X_A_MONTH', u'méně než 3x měsíčně')", "(2, 'ONCE_A_WEEK', u'1x týdně')", "(3, 'ON_WEEKENDS', u'víkendově')", "(4, 'EVERY_SECOND_DAY', u'obden')", "(5, 'DAILY', u'denně')", "(6, '2X_3X_A_DAY', u'2-3x denně')", "(7, 'MORE_THAN_3X_A_DAY', u'více než 3x denně')", "(8, 'NONE_FOR_MORE_THAN_6_MONTHS', u'neužita déle než 6 měsíců')", "(10, 'NONE_FOR_LAST_3_MONTHS', u'neužita poslední 3 měsíce')", "(11, 'NONE_FOR_LAST_1_MONTH', u'neužita v posledním měsíci')", "(12, 'UNKNOWN', u'Není známo')"], {}), "((1, 'LESS_THAN_3X_A_MONTH', u'méně než 3x měsíčně'), (2,\n 'ONCE_A_WEEK', u'1x týdně'), (3, 'ON_WEEKENDS', u'víkendově'), (4,\n 'EVERY_SECOND_DAY', u'obden'), (5, 'DAILY', u'denně'), (6,\n '2X_3X_A_DAY', u'2-3x denně'), (7, 'MORE_THAN_3X_A_DAY',\n u'více než 3x denně'), (8, 'NONE_FOR_MORE_THAN_6_MONTHS',\n u'neužita déle než 6 měsíců'), (10, 'NONE_FOR_LAST_3_MONTHS',\n u'neužita poslední 3 měsíce'), (11, 'NONE_FOR_LAST_1_MONTH',\n u'neužita v posledním měsíci'), (12, 'UNKNOWN', u'Není známo'))\n", (3035, 3548), False, 'from model_utils import Choices\n'), ((3675, 3936), 'model_utils.Choices', 'Choices', (["(1, 'VEIN_INJECTION', u'injekčně do žíly')", "(2, 'MUSCLE_INJECTION', u'injekčně do svalu')", "(3, 'ORAL', u'ústně')", "(4, 'SNIFFING', u'sniff (šňupání)')", "(5, 'SMOKING', u'kouření')", "(6, 'INHALATION', u'inhalace')", "(7, 'UNKNOWN', u'Není známo')"], {}), "((1, 'VEIN_INJECTION', u'injekčně do žíly'), (2, 'MUSCLE_INJECTION',\n u'injekčně do svalu'), (3, 'ORAL', u'ústně'), (4, 'SNIFFING',\n u'sniff (šňupání)'), (5, 'SMOKING', u'kouření'), (6, 'INHALATION',\n u'inhalace'), (7, 'UNKNOWN', u'Není známo'))\n", (3682, 3936), False, 'from model_utils import Choices\n'), ((3978, 4336), 'model_utils.Choices', 'Choices', (["(1, 'EQUIPMENT_SHARING', u'Sdílení náčiní')", "(2, 'SEX_WITHOUT_PROTECTION', u'Nechráněný sex')", "(3, 'SYRINGE_SHARING', u'Sdílení jehel')", "(4, 'INTRAVENOUS_APPLICATION', u'Nitrožilní aplikace')", "(5, 'RISKY_APPLICATION', u'Riziková aplikace')", "(6, 'OVERDOSING', u'Předávkování')", "(7, 'HEALTH_COMPLICATIONS', u'Zdravotní komplikace')"], {}), "((1, 'EQUIPMENT_SHARING', u'Sdílení náčiní'), (2,\n 'SEX_WITHOUT_PROTECTION', u'Nechráněný sex'), (3, 'SYRINGE_SHARING',\n u'Sdílení jehel'), (4, 'INTRAVENOUS_APPLICATION',\n u'Nitrožilní aplikace'), (5, 'RISKY_APPLICATION', u'Riziková aplikace'),\n (6, 'OVERDOSING', u'Předávkování'), (7, 'HEALTH_COMPLICATIONS',\n u'Zdravotní komplikace'))\n", (3985, 4336), False, 'from model_utils import Choices\n'), ((4377, 4501), 'model_utils.Choices', 'Choices', (["(1, 'NEVER', u'Nikdy')", "(2, 'ONCE', u'Jednorázově')", "(3, 'OFTEN', u'Opakovaně ')", "(4, 'UNKNOWN', u'Není známo')"], {}), "((1, 'NEVER', u'Nikdy'), (2, 'ONCE', u'Jednorázově'), (3, 'OFTEN',\n u'Opakovaně '), (4, 'UNKNOWN', u'Není známo'))\n", (4384, 4501), False, 'from model_utils import Choices\n'), ((4528, 4647), 'model_utils.Choices', 'Choices', (["(1, 'HIV', u'HIV')", "(2, 'VHA', u'VHA')", "(3, 'VHB', u'VHB')", "(4, 'VHC', u'VHC')", "(5, 'SYFILIS', u'Syfilis')"], {}), "((1, 'HIV', u'HIV'), (2, 'VHA', u'VHA'), (3, 'VHB', u'VHB'), (4,\n 'VHC', u'VHC'), (5, 'SYFILIS', u'Syfilis'))\n", (4535, 4647), False, 'from model_utils import Choices\n'), ((4691, 5001), 'model_utils.Choices', 'Choices', (["(0, 'UNKNOWN', u'Neznámo, zda testován')", "(1, 'TESTED_POSITIVE', u'Testován - pozitivní')", "(2, 'TESTED_NEGATIVE', u'Testován - negativní')", "(3, 'TESTED_UNKNOWN', u'Testován - výsledek neznámý')", "(4, 'NOT_TESTED', u'Nikdy netestován')", "(5, 'RESULT_NOT_ACCLAIMED', u'Nevyzvedl výsledek')"], {}), "((0, 'UNKNOWN', u'Neznámo, zda testován'), (1, 'TESTED_POSITIVE',\n u'Testován - pozitivní'), (2, 'TESTED_NEGATIVE',\n u'Testován - negativní'), (3, 'TESTED_UNKNOWN',\n u'Testován - výsledek neznámý'), (4, 'NOT_TESTED', u'Nikdy netestován'),\n (5, 'RESULT_NOT_ACCLAIMED', u'Nevyzvedl výsledek'))\n", (4698, 5001), False, 'from model_utils import Choices\n'), ((5031, 5185), 'model_utils.Choices', 'Choices', (["('p', 'POSITIVE', u'Pozitivní')", "('n', 'NEGATIVE', u'Negativní')", "('r', 'REACTIVE', u'Reaktivní')", "('i', 'INCONCLUSIVE', u'Test neprůkazný')"], {}), "(('p', 'POSITIVE', u'Pozitivní'), ('n', 'NEGATIVE', u'Negativní'), (\n 'r', 'REACTIVE', u'Reaktivní'), ('i', 'INCONCLUSIVE', u'Test neprůkazný'))\n", (5038, 5185), False, 'from model_utils import Choices\n'), ((5218, 5357), 'model_utils.Choices', 'Choices', (["(1, 'NON_USER', u'neuživatel')", "(2, 'NON_IV', u'neIV')", "(3, 'IV', u'IV')", "(4, 'NON_USER_PARENT', u'rodič')", "(5, 'THC', u'THC')"], {}), "((1, 'NON_USER', u'neuživatel'), (2, 'NON_IV', u'neIV'), (3, 'IV',\n u'IV'), (4, 'NON_USER_PARENT', u'rodič'), (5, 'THC', u'THC'))\n", (5225, 5357), False, 'from model_utils import Choices\n')] |
######################################################################
# OLED_Clock.py
#
# This program display date and time on OLED module
######################################################################
import Adafruit_SSD1306
from datetime import datetime
import time
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Setup Display
RST=24
device = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
device.begin()
device.clear()
device.display()
width = device.width
height = device.height
fontFile = '/usr/share/fonts/truetype/freefont/FreeSansBold.ttf'
smallFont = ImageFont.truetype(fontFile, 12)
largeFont = ImageFont.truetype(fontFile, 33)
# Display a message on 3 lines, first line big font
def DisplayMessage(line1, line2):
global device
image = Image.new('1', (width, height))
draw = ImageDraw.Draw(image)
maxWidth, unused = draw.textsize(line1, font=largeFont)
#with canvas(deviccd e) as draw:
draw.text((10, 0), line1, font=smallFont, fill=255)
draw.text((0, 20), line2, font=largeFont, fill=255)
device.image(image)
device.display()
while True:
now = datetime.now()
dateMessage = '{:%d %B %Y}'.format(now)
timeMessage = '{:%H:%M:%S}'.format(now)
DisplayMessage(dateMessage,timeMessage)
time.sleep(0.1)
| [
"PIL.Image.new",
"PIL.ImageFont.truetype",
"time.sleep",
"Adafruit_SSD1306.SSD1306_128_64",
"datetime.datetime.now",
"PIL.ImageDraw.Draw"
] | [((392, 432), 'Adafruit_SSD1306.SSD1306_128_64', 'Adafruit_SSD1306.SSD1306_128_64', ([], {'rst': 'RST'}), '(rst=RST)\n', (423, 432), False, 'import Adafruit_SSD1306\n'), ((602, 634), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['fontFile', '(12)'], {}), '(fontFile, 12)\n', (620, 634), False, 'from PIL import ImageFont\n'), ((647, 679), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['fontFile', '(33)'], {}), '(fontFile, 33)\n', (665, 679), False, 'from PIL import ImageFont\n'), ((805, 836), 'PIL.Image.new', 'Image.new', (['"""1"""', '(width, height)'], {}), "('1', (width, height))\n", (814, 836), False, 'from PIL import Image\n'), ((848, 869), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (862, 869), False, 'from PIL import ImageDraw\n'), ((1149, 1163), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1161, 1163), False, 'from datetime import datetime\n'), ((1300, 1315), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1310, 1315), False, 'import time\n')] |
'''
Contains the extended FastAPI router, for simplified CRUD from a model
'''
from typing import Any, List, Optional, Sequence, Set, Type, Union
import fastapi
from fastapi import Depends, params
from pydantic import BaseModel, create_model
from odim import Odim, OkResponse, SearchResponse
from odim.dependencies import SearchParams
class OdimRouter(fastapi.APIRouter):
''' Simplified FastAPI router for easy CRUD '''
def mount_crud(self,
path: str,
*,
model : Type[BaseModel],
tags: Optional[List[str]] = None,
dependencies : Optional[Sequence[params.Depends]] = None,
include_in_schema: bool = True,
methods : Optional[Union[Set[str], List[str]]] = ('create','get','search','save','update','delete'),
methods_exclude : Optional[Union[Set[str], List[str]]] = [],
extend_query : dict= {}):
''' Add endpoints for CRUD operations for particular model
:param path: base_path, for the model resource location eg: /api/houses/
:param model: pydantic/Odim BaseModel, that is used for eg. Houses
:param tags: Starlette/FastAPI tags for endpoints
:param dependencies: Starlette/FastAPI dependencies for all endpoints
:param include_in_schema: whether to include in docs
:param methods: methods to automatically generate ('create','get','search','save','update','delete')
:param methods_exclude: methods to NOT automatically generate ('create','get','search','save','update','delete')
:param extend_query: adds these parameters to every query and sets it on the object upon creation. keys are fields, values can be exact values or functions taking request as parameter
'''
add_methods = [ x for x in methods if x not in methods_exclude ]
if 'create' in add_methods:
async def create(request : fastapi.Request, obj : model):
for k, v in exec_extend_qeury(request,extend_query).items():
setattr(obj, k, v)
await Odim(obj).save()
return obj
self.add_api_route(path = path,
endpoint=create,
response_model=model,
status_code=201,
tags=tags,
dependencies = dependencies,
summary="Create new %s" % model.schema().get('title'),
description = "Create new instance of %s " % model.schema().get('title'),
methods = ["POST"],
include_in_schema = include_in_schema)
if 'get' in add_methods:
async def get(request : fastapi.Request, id : str):
return await Odim(model).get(id=id, extend_query=exec_extend_qeury(request,extend_query))
self.add_api_route(path = path+"{id}",
endpoint=get,
response_model=model,
tags=tags,
dependencies = dependencies,
summary="Get %s by id" % model.schema().get('title'),
description = "Return individual %s details " % model.schema().get('title'),
methods = ["GET"],
include_in_schema = include_in_schema)
if 'search' in add_methods:
async def search(request : fastapi.Request, search_params : dict = Depends(SearchParams)):
sp = {**search_params.q, **exec_extend_qeury(request,extend_query)}
rsp = { "results" : await Odim(model).find(sp, search_params),
"total" : await Odim(model).count(sp),
"search" : search_params.dict()}
return rsp
self.add_api_route(path = path,
endpoint=search,
response_model=SearchResponse[model],
tags=tags,
dependencies = dependencies,
summary="Search for %ss" % model.schema().get('title'),
description = "Performs a listing search for %s " % model.schema().get('title'),
methods = ["GET"],
include_in_schema = include_in_schema)
if 'save' in add_methods:
async def save(request : fastapi.Request, id : str, obj : model):
obj.id = id
await Odim(obj).save(extend_query=exec_extend_qeury(request,extend_query))
return obj
self.add_api_route(path = path+"{id}",
endpoint=save,
response_model=model,
tags=tags,
dependencies = dependencies,
summary="Replace %s by id" % model.schema().get('title'),
description = "PUT replaces the original %s as whole " % model.schema().get('title'),
methods = ["PUT"],
include_in_schema = include_in_schema)
if 'update' in add_methods:
async def update(request : fastapi.Request, id : str, obj : model):
obj.id = id
await Odim(obj).update(extend_query=exec_extend_qeury(request,extend_query))
return obj
self.add_api_route(path = path+"{id}",
endpoint=update,
response_model=model,
tags=tags,
dependencies = dependencies,
summary="Partial update %s by id" % model.schema().get('title'),
description = "Just updates individual fields of %s " % model.schema().get('title'),
methods = ["Patch"],
include_in_schema = include_in_schema)
if 'delete' in add_methods:
async def delete(request : fastapi.Request, id : str) -> None:
await Odim(model).delete(id, extend_query=exec_extend_qeury(request,extend_query))
return OkResponse()
self.add_api_route(path = path+"{id}",
endpoint=delete,
response_model=OkResponse,
status_code=200,
tags=tags,
dependencies = dependencies,
summary="Delete %s by id" % model.schema().get('title'),
description = "Deletes individual instance of %s " % model.schema().get('title'),
methods = ["DELETE"],
include_in_schema = include_in_schema)
def generate(self,
path: str,
*,
model : Type[BaseModel],
tags: Optional[List[str]] = None,
dependencies : Optional[Sequence[params.Depends]] = None,
include_in_schema: bool = True,
methods : Optional[Union[Set[str], List[str]]] = ('create','get','search','save','update','delete'),
methods_exclude : Optional[Union[Set[str], List[str]]] = []):
''' Generates the code for the endpoints
:param path: base_path, for the model resource location eg: /api/houses/
:param model: pydantic/Odim BaseModel, that is used for eg. Houses
:param tags: Starlette/FastAPI tags for endpoints
:param dependencies: Starlette/FastAPI dependencies for all endpoints
:param include_in_schema: whether to include in docs
:param methods: methods to automatically generate ('create','get','search','save','update','delete')
:param methods_exclude: methods to NOT automatically generate ('create','get','search','save','update','delete')
'''
add_methods = [ x for x in methods if x not in methods_exclude ]
model_name = model.__name__
other=""
if tags:
other+= ", tags="+str(tags)
if dependencies:
other+= ", dependencies="+str(dependencies)
if not include_in_schema:
other+= ", include_in_schema=False"
if 'get' in add_methods:
print(f'''
@router.get("{path}{{id}}", response_model={model_name}{other})
async def get_{model_name}(id : str):
\'\'\' Returns the individual {model_name} details\'\'\'
return await Odim({model_name}).get(id=id)
''')
if 'search' in add_methods:
print(f'''
@router.get("{path}", response_model=SearchResponse[{model_name}]{other})
async def search_{model_name}(search : dict = Depends(SearchParams)):
rsp = {{ "results" : await Odim({model_name}).find(search.q, search),
"total" : await Odim({model_name}).count(search.q),
"search" : search.dict()}}
return rsp
''')
if 'create' in add_methods:
print(f'''
@router.post("{path}", status_code=201, response_model={model_name}{other})
async def create_{model_name}(obj : {model_name}):
await Odim(obj).save()
return obj
''')
if 'save' in add_methods:
print(f'''
@router.put("{path}{{id}}", response_model={model_name}{other})
async def save_{model_name}(id : str, obj : {model_name}):
obj.id = id
await Odim(obj).save()
return obj
''')
if 'update' in add_methods:
print(f'''
@router.patch("{path}{{id}}", response_model={model_name}{other})
async def update_{model_name}(id : str, obj : {model_name}):
obj.id = id
await Odim(obj).update()
return obj
''')
if 'delete' in add_methods:
print(f'''
@router.delete("{path}{{id}}", status_code=200, response_model=OkResponse)
async def delete_{model_name}(id : str):
await Odim(obj).delete(id)
return OkResponse()
''')
def exec_extend_qeury(request : fastapi.Request, sl : dict = {}):
out = {}
for k, v in sl.items():
if callable(v):
out[k] = v(request)
else:
out[k] = v
return out | [
"odim.OkResponse",
"odim.Odim",
"fastapi.Depends"
] | [((3431, 3452), 'fastapi.Depends', 'Depends', (['SearchParams'], {}), '(SearchParams)\n', (3438, 3452), False, 'from fastapi import Depends, params\n'), ((5915, 5927), 'odim.OkResponse', 'OkResponse', ([], {}), '()\n', (5925, 5927), False, 'from odim import Odim, OkResponse, SearchResponse\n'), ((2045, 2054), 'odim.Odim', 'Odim', (['obj'], {}), '(obj)\n', (2049, 2054), False, 'from odim import Odim, OkResponse, SearchResponse\n'), ((2738, 2749), 'odim.Odim', 'Odim', (['model'], {}), '(model)\n', (2742, 2749), False, 'from odim import Odim, OkResponse, SearchResponse\n'), ((4391, 4400), 'odim.Odim', 'Odim', (['obj'], {}), '(obj)\n', (4395, 4400), False, 'from odim import Odim, OkResponse, SearchResponse\n'), ((5114, 5123), 'odim.Odim', 'Odim', (['obj'], {}), '(obj)\n', (5118, 5123), False, 'from odim import Odim, OkResponse, SearchResponse\n'), ((5823, 5834), 'odim.Odim', 'Odim', (['model'], {}), '(model)\n', (5827, 5834), False, 'from odim import Odim, OkResponse, SearchResponse\n'), ((3565, 3576), 'odim.Odim', 'Odim', (['model'], {}), '(model)\n', (3569, 3576), False, 'from odim import Odim, OkResponse, SearchResponse\n'), ((3634, 3645), 'odim.Odim', 'Odim', (['model'], {}), '(model)\n', (3638, 3645), False, 'from odim import Odim, OkResponse, SearchResponse\n')] |
from collections import namedtuple
RGB = namedtuple("RGB", "red, green, blue")
COLORS = {
"red": RGB(255, 0, 0),
"orange-deep": RGB(255, 40, 0),
"orange": RGB(255, 120, 0),
"yellow": RGB(255, 200, 0),
"yellow-acid": RGB(160, 255, 0),
"green": RGB(0, 255, 0),
"green-forest": RGB(34, 139, 34),
"green-spring": RGB(0, 255, 127),
"green-teal": RGB(0, 128, 128),
"green-turquoise": RGB(0, 199, 140),
"green-coral": RGB(0, 255, 50),
"cyan": RGB(0, 255, 255),
"blue": RGB(0, 0, 255),
"blue-light": RGB(65, 105, 225),
"blue-navy": RGB(0, 0, 128),
"blue-aqua": RGB(0, 255, 255),
"purple": RGB(128, 0, 128),
"pink": RGB(255, 0, 178),
"magenta": RGB(255, 0, 255),
"black": RGB(0, 0, 0),
"white": RGB(255, 255, 255),
"brown": RGB(139, 69, 19),
"gold": RGB(255, 215, 0),
"hotpink": RGB(255, 105, 180),
"lightblue": RGB(173, 216, 230),
"lightgreen": RGB(152, 251, 152),
"lightpink": RGB(255, 182, 193),
"lightyellow": RGB(255, 255, 224),
"maroon": RGB(128, 0, 0),
"mint": RGB(189, 252, 201),
"olive": RGB(85, 107, 47),
"peach": RGB(255, 100, 100),
"plum": RGB(221, 160, 221),
"sepia": RGB(94, 38, 18),
"skyblue": RGB(135, 206, 235),
"steelblue": RGB(70, 130, 180),
"tan": RGB(210, 180, 140),
"violetred": RGB(208, 32, 144),
}
GRADIENTS = {
"Rainbow": {
"colors": [
"red",
"orange",
"yellow",
"green",
"green-turquoise",
"blue",
"purple",
"pink",
]
},
"Dancefloor": {"colors": ["red", "pink", "blue"]},
"Plasma": {"colors": ["blue", "purple", "red", "orange-deep", "yellow"]},
"Ocean": {"colors": ["blue-aqua", "blue"]},
"Viridis": {"colors": ["purple", "blue", "green-teal", "green", "yellow"]},
"Jungle": {"colors": ["green", "green-forest", "orange"]},
"Spring": {"colors": ["pink", "orange-deep", "yellow"]},
"Winter": {"colors": ["green-turquoise", "green-coral"]},
"Frost": {"colors": ["blue", "blue-aqua", "purple", "pink"]},
"Sunset": {"colors": ["blue-navy", "orange", "red"]},
"Borealis": {
"colors": [
"orange-deep",
"purple",
"green-turquoise",
"green",
]
},
"Rust": {"colors": ["orange-deep", "red"]},
"Christmas": {
"colors": [
"red",
"red",
"red",
"red",
"red",
"green",
"green",
"green",
"green",
"green",
],
"method": "repeat",
},
"Winamp": {
"colors": [
"green",
"yellow",
"orange",
"orange-deep",
"red",
]
},
}
| [
"collections.namedtuple"
] | [((42, 79), 'collections.namedtuple', 'namedtuple', (['"""RGB"""', '"""red, green, blue"""'], {}), "('RGB', 'red, green, blue')\n", (52, 79), False, 'from collections import namedtuple\n')] |
#!/usr/bin/env python
from helpers import sjoin, cjoin
from random import shuffle
card_types = [
("tax",1,1), # tax everyone 2 coins => bank
("soldier",2,1),
("sergeant",3,1),
("captain",4,2),
("emperor",1,5),
("prince",1,1), # prince takes 1/3rd of bank
]
class Card:
def __init__(self, name, power=1, honor=1):
self.name = name
self.power, self.honor = power, honor
def __repr__(self):
return "<%s %s %s>" % (self.name, self.power, self.honor)
class Player:
coins = 4
out = False
def __init__(self, name, cards):
self.name = name
self.cards = cards
def __repr__(self):
return cjoin(self.name, self.cards, self.coins)
def get_card(self, name):
for c in self.cards:
if c.name == name:
return c
def score(self):
return sum(c.honor for c in self.cards)
deck = [Card(*c) for c in card_types]
deck += [Card(*c) for c in card_types]
for _ in range(15):
deck.append(Card(*randchoice(card_types)))
shuffle(deck)
def draw(lst, n):
items, lst = lst[:n], lst[n:]
return items
players = [Player('a', draw(deck,5)),
Player('b', draw(deck,5)),
Player('c', draw(deck,5))
]
class Play:
bank = 25
def play_prince(self, player, card):
amt = round(self.bank / 3)
self.bank -= amt
player.coins += amt
player.cards.remove(card)
def play_tax(self, player, card):
others = [p for p in players if p!=player]
for p in others:
p.coins -= 2
if p.coins < 0:
players.remove(p)
def check_end(self):
return len(players) == 1
def go(self):
for p in players:
prince = p.get_card("prince")
tax = p.get_card("tax")
if prince:
self.play_prince(p, prince)
elif tax:
self.play_tax()
| [
"helpers.cjoin",
"random.shuffle"
] | [((1135, 1148), 'random.shuffle', 'shuffle', (['deck'], {}), '(deck)\n', (1142, 1148), False, 'from random import shuffle\n'), ((762, 802), 'helpers.cjoin', 'cjoin', (['self.name', 'self.cards', 'self.coins'], {}), '(self.name, self.cards, self.coins)\n', (767, 802), False, 'from helpers import sjoin, cjoin\n')] |
import bson
import json
import swifty
#
# GET /tasks -- list tasks
# POST /tasks $BODY -- add new task
# GET /tasks/ID -- get info about task
# PUT /tasks/ID -- update task (except status)
# DELETE /tasks/ID -- remove task
# POST /tasks/ID/done -- mark task as done
#
def toTask(obj):
return { 'id': str(obj['_id']), 'task': obj['task'], 'status': obj['status'] }
def fromTask(body, q):
b = json.loads(body)
if 'task' in b:
q['task'] = b['task']
def Main(req):
db = swifty.MongoDatabase('tasks')
col = db['tasks']
p = req.path.split('/')
if p[0] != 'tasks':
return {}, { 'status': 404 }
q = { 'owner': req.claims['cookie'] }
if len(p) == 1:
if req.method == 'GET':
if 'status' in req.args:
q['status'] = req.args['status']
return [ toTask(x) for x in col.find(q) ], None
if req.method == 'POST':
q['status'] = 'new'
fromTask(req.body, q)
col.insert_one(q)
return {}, None
q['_id'] = bson.ObjectId(p[1])
if len(p) == 2:
if req.method == 'GET':
return toTask(col.find_one(q)), None
if req.method == 'PUT':
e = { }
fromTask(req.body, e)
col.update_one(q, { '$set': e })
return {}, None
if req.method == 'DELETE':
col.delete_one(q)
return {}, None
if len(p) == 3:
if p[2] == 'done' and req.method == 'POST':
col.update_one(q, { '$set': { 'status': 'done' } })
return {}, None
return {}, { 'status': 404 }
| [
"bson.ObjectId",
"json.loads",
"swifty.MongoDatabase"
] | [((440, 456), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (450, 456), False, 'import json\n'), ((532, 561), 'swifty.MongoDatabase', 'swifty.MongoDatabase', (['"""tasks"""'], {}), "('tasks')\n", (552, 561), False, 'import swifty\n'), ((1090, 1109), 'bson.ObjectId', 'bson.ObjectId', (['p[1]'], {}), '(p[1])\n', (1103, 1109), False, 'import bson\n')] |
# https://dmoj.ca/problem/tss17a
# https://dmoj.ca/submission/2226280
import sys
n = int(sys.stdin.readline()[:-1])
for i in range(n):
instruction = sys.stdin.readline()[:-1].split()
printed = False
for j in range(3):
if instruction.count(instruction[j]) >= 2:
print(instruction[j])
printed = True
break
if not printed:
print('???') | [
"sys.stdin.readline"
] | [((90, 110), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (108, 110), False, 'import sys\n'), ((154, 174), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (172, 174), False, 'import sys\n')] |
from todo.templatetags.todo_tags import is_management
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import HttpResponse
from django.shortcuts import render
from todo.models import Designer, Management, Writer, Editor
@login_required
@user_passes_test(is_management)
def users_detail(request, list_slug=None) -> HttpResponse:
# Which users to show on this list view?
if list_slug == "editors":
users = Editor.objects.all()
elif list_slug == "designers":
users = Designer.objects.all()
elif list_slug == "writers":
users = Writer.objects.all()
elif list_slug == "management":
users = Management.objects.all()
# Additional filtering
active_users = users.filter(user__is_active=True)
unactive_users = users.filter(user__is_active=False)
# ######################
# Add New User Form
# ######################
context = {
"list_slug": list_slug,
"active_users": active_users,
"unactive_users": unactive_users,
"users": users,
}
return render(request, "todo/users_detail.html", context)
| [
"django.shortcuts.render",
"todo.models.Writer.objects.all",
"todo.models.Editor.objects.all",
"todo.models.Designer.objects.all",
"django.contrib.auth.decorators.user_passes_test",
"todo.models.Management.objects.all"
] | [((283, 314), 'django.contrib.auth.decorators.user_passes_test', 'user_passes_test', (['is_management'], {}), '(is_management)\n', (299, 314), False, 'from django.contrib.auth.decorators import login_required, user_passes_test\n'), ((1108, 1158), 'django.shortcuts.render', 'render', (['request', '"""todo/users_detail.html"""', 'context'], {}), "(request, 'todo/users_detail.html', context)\n", (1114, 1158), False, 'from django.shortcuts import render\n'), ((471, 491), 'todo.models.Editor.objects.all', 'Editor.objects.all', ([], {}), '()\n', (489, 491), False, 'from todo.models import Designer, Management, Writer, Editor\n'), ((543, 565), 'todo.models.Designer.objects.all', 'Designer.objects.all', ([], {}), '()\n', (563, 565), False, 'from todo.models import Designer, Management, Writer, Editor\n'), ((615, 635), 'todo.models.Writer.objects.all', 'Writer.objects.all', ([], {}), '()\n', (633, 635), False, 'from todo.models import Designer, Management, Writer, Editor\n'), ((688, 712), 'todo.models.Management.objects.all', 'Management.objects.all', ([], {}), '()\n', (710, 712), False, 'from todo.models import Designer, Management, Writer, Editor\n')] |
from bluepy import btle
import concurrent
from concurrent import futures
import threading
import multiprocessing
import time
from time_sync import *
import eval_client
import dashBoardClient
from joblib import dump, load
import numpy # to count labels and store in dict
import operator # to get most predicted label
import json
import random # RNG in worst case
from sklearn.preprocessing import StandardScaler # to normalise data
class UUIDS:
SERIAL_COMMS = btle.UUID("0000dfb1-0000-1000-8000-00805f9b34fb")
class Delegate(btle.DefaultDelegate):
def __init__(self, params):
btle.DefaultDelegate.__init__(self)
def handleNotification(self, cHandle, data):
ultra96_receiving_timestamp = time.time() * 1000
for idx in range(len(beetle_addresses)):
if global_delegate_obj[idx] == self:
#print("receiving data from %s" % (beetle_addresses[idx]))
#print("data: " + data.decode('ISO-8859-1'))
if beetle_addresses[idx] == "50:F1:4A:CC:01:C4": # emg beetle data
emg_buffer[beetle_addresses[idx]
] += data.decode('ISO-8859-1')
if '>' in data.decode('ISO-8859-1'):
print("sending emg dataset to dashboard")
packet_count_dict[beetle_addresses[idx]] += 1
try:
arr = emg_buffer[beetle_addresses[idx]].split(">")[
0]
final_arr = arr.split(",")
board_client.send_data_to_DB(
beetle_addresses[idx], str(final_arr))
emg_buffer[beetle_addresses[idx]] = ""
except Exception as e:
print(e)
board_client.send_data_to_DB(
beetle_addresses[idx], str(["1", "1", "1", "1"]))
emg_buffer[beetle_addresses[idx]] = ""
else:
if incoming_data_flag[beetle_addresses[idx]] is True:
if handshake_flag_dict[beetle_addresses[idx]] is True:
buffer_dict[beetle_addresses[idx]
] += data.decode('ISO-8859-1')
if '>' not in data.decode('ISO-8859-1'):
pass
else:
if 'T' in buffer_dict[beetle_addresses[idx]]:
for char in buffer_dict[beetle_addresses[idx]]:
if char == 'T':
ultra96_receiving_timestamp = time.time() * 1000
continue
if char == '>': # end of packet
try:
timestamp_dict[beetle_addresses[idx]].append(
int(datastring_dict[beetle_addresses[idx]]))
except Exception:
timestamp_dict[beetle_addresses[idx]].append(
0)
timestamp_dict[beetle_addresses[idx]].append(
ultra96_receiving_timestamp)
handshake_flag_dict[beetle_addresses[idx]] = False
clocksync_flag_dict[beetle_addresses[idx]] = True
# clear serial input buffer to get ready for data packets
datastring_dict[beetle_addresses[idx]] = ""
buffer_dict[beetle_addresses[idx]] = ""
return
elif char != '>':
if char == '|': # signify start of next timestamp
try:
timestamp_dict[beetle_addresses[idx]].append(
int(datastring_dict[beetle_addresses[idx]]))
except Exception:
timestamp_dict[beetle_addresses[idx]].append(
0)
datastring_dict[beetle_addresses[idx]] = ""
else:
datastring_dict[beetle_addresses[idx]] += char
else:
pass
else:
if '>' in data.decode('ISO-8859-1'):
buffer_dict[beetle_addresses[idx]
] += data.decode('ISO-8859-1')
#print("storing dance dataset")
packet_count_dict[beetle_addresses[idx]] += 1
else:
buffer_dict[beetle_addresses[idx]
] += data.decode('ISO-8859-1')
# send data to dashboard once every 10 datasets
try:
if packet_count_dict[beetle_addresses[idx]] % 10 == 0 and '>' in data.decode('ISO-8859-1'):
print("sending data to dashboard")
first_string = buffer_dict[beetle_addresses[idx]].split("|")[
0]
final_arr = [first_string.split(",")[0], str(int(first_string.split(",")[1])/divide_get_float), str(int(first_string.split(",")[2])/divide_get_float),
str(int(first_string.split(",")[
3])/divide_get_float), str(int(first_string.split(",")[4])/divide_get_float),
str(int(first_string.split(",")[5])/divide_get_float), str(int(first_string.split(",")[6])/divide_get_float)]
board_client.send_data_to_DB(
beetle_addresses[idx], str(final_arr))
except Exception as e:
print(e)
"""
class EMGThread(object):
def __init__(self):
thread = threading.Thread(target=self.getEMGData, args=(beetle, ))
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def getEMGData(self, beetle):
while True:
try:
if beetle.waitForNotifications(2):
continue
except Exception as e:
reestablish_connection(beetle)
"""
def initHandshake(beetle):
retries = 0
if beetle.addr != "50:F1:4A:CC:01:C4":
ultra96_sending_timestamp = time.time() * 1000
incoming_data_flag[beetle.addr] = True
handshake_flag_dict[beetle.addr] = True
for characteristic in beetle.getCharacteristics():
if characteristic.uuid == UUIDS.SERIAL_COMMS:
ultra96_sending_timestamp = time.time() * 1000
timestamp_dict[beetle.addr].append(
ultra96_sending_timestamp)
print("Sending 'T' and 'H' and 'Z' packets to %s" %
(beetle.addr))
characteristic.write(
bytes('T', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('H', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('Z', 'UTF-8'), withResponse=False)
while True:
try:
if beetle.waitForNotifications(2):
if clocksync_flag_dict[beetle.addr] is True:
# function for time calibration
try:
clock_offset_tmp = calculate_clock_offset(timestamp_dict[beetle.addr])
tmp_value_list = []
if clock_offset_tmp is not None:
tmp_value_list.append(clock_offset_tmp)
clock_offset_dict[beetle.addr] = tmp_value_list
except Exception as e:
print(e)
timestamp_dict[beetle.addr].clear()
print("beetle %s clock offset: %i" %
(beetle.addr, clock_offset_dict[beetle.addr][-1]))
clocksync_flag_dict[beetle.addr] = False
incoming_data_flag[beetle.addr] = False
return
else:
continue
else:
while True:
if retries >= 5:
retries = 0
break
print(
"Failed to receive timestamp, sending 'Z', 'T', 'H', and 'R' packet to %s" % (beetle.addr))
characteristic.write(
bytes('R', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('T', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('H', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('Z', 'UTF-8'), withResponse=False)
retries += 1
except Exception as e:
reestablish_connection(beetle)
def establish_connection(address):
while True:
try:
for idx in range(len(beetle_addresses)):
# for initial connections or when any beetle is disconnected
if beetle_addresses[idx] == address:
if global_beetle[idx] != 0: # do not reconnect if already connected
return
else:
print("connecting with %s" % (address))
beetle = btle.Peripheral(address)
global_beetle[idx] = beetle
beetle_delegate = Delegate(address)
global_delegate_obj[idx] = beetle_delegate
beetle.withDelegate(beetle_delegate)
if address != "50:F1:4A:CC:01:C4":
initHandshake(beetle)
print("Connected to %s" % (address))
return
except Exception as e:
print(e)
for idx in range(len(beetle_addresses)):
# for initial connections or when any beetle is disconnected
if beetle_addresses[idx] == address:
if global_beetle[idx] != 0: # do not reconnect if already connected
return
time.sleep(3)
def reestablish_connection(beetle):
while True:
try:
print("reconnecting to %s" % (beetle.addr))
beetle.connect(beetle.addr)
print("re-connected to %s" % (beetle.addr))
return
except:
time.sleep(1)
continue
def getDanceData(beetle):
if beetle.addr != "50:F1:4A:CC:01:C4":
timeout_count = 0
retries = 0
incoming_data_flag[beetle.addr] = True
for characteristic in beetle.getCharacteristics():
if characteristic.uuid == UUIDS.SERIAL_COMMS:
while True:
if retries >= 10:
retries = 0
break
print(
"sending 'A' to beetle %s to collect dancing data", (beetle.addr))
characteristic.write(
bytes('A', 'UTF-8'), withResponse=False)
retries += 1
while True:
try:
if beetle.waitForNotifications(2):
#print("getting data...")
# print(packet_count_dict[beetle.addr])
# if number of datasets received from all beetles exceed expectation
if packet_count_dict[beetle.addr] >= num_datasets:
print("sufficient datasets received from %s. Processing data now" % (
beetle.addr))
# reset for next dance move
packet_count_dict[beetle.addr] = 0
incoming_data_flag[beetle.addr] = False
while True:
if retries >= 10:
break
characteristic.write(
bytes('Z', 'UTF-8'), withResponse=False)
retries += 1
return
continue
# beetle finish transmitting, but got packet losses
elif (packet_count_dict[beetle.addr] < num_datasets) and (packet_count_dict[beetle.addr] >= 1):
print(packet_count_dict[beetle.addr])
print("sufficient datasets received from %s with packet losses. Processing data now" % (
beetle.addr))
# reset for next dance move
packet_count_dict[beetle.addr] = 0
incoming_data_flag[beetle.addr] = False
while True:
if retries >= 10:
break
characteristic.write(
bytes('Z', 'UTF-8'), withResponse=False)
retries += 1
return
elif timeout_count >= 3:
incoming_data_flag[beetle.addr] = False
packet_count_dict[beetle.addr] = 0
timeout_count = 0
return
else: # beetle did not start transmitting despite ultra96 sending 'A' previously
timeout_count += 1
packet_count_dict[beetle.addr] = 0
retries = 0
while True:
if retries >= 10:
retries = 0
break
print(
"Failed to receive data, resending 'A' and 'B' packet to %s" % (beetle.addr))
characteristic.write(
bytes('A', 'UTF-8'), withResponse=False)
characteristic.write(
bytes('B', 'UTF-8'), withResponse=False)
retries += 1
except Exception as e:
reestablish_connection(beetle)
def getEMGData(beetle):
retries = 0
for characteristic in beetle.getCharacteristics():
if characteristic.uuid == UUIDS.SERIAL_COMMS:
while True:
if retries >= 5:
retries = 0
break
print(
"sending 'E' to beetle %s to collect emg data", (beetle.addr))
characteristic.write(
bytes('E', 'UTF-8'), withResponse=False)
retries += 1
while True:
try:
if beetle.waitForNotifications(2):
if packet_count_dict[beetle.addr] >= 1:
packet_count_dict[beetle.addr] = 0
retries = 0
while True:
if retries >= 8:
break
characteristic.write(
bytes('X', 'UTF-8'), withResponse=False)
retries += 1
return
continue
else:
print("failed to collect emg data, resending 'E'")
characteristic.write(
bytes('E', 'UTF-8'), withResponse=False)
except Exception as e:
reestablish_connection(beetle)
def processData(address):
if address != "50:F1:4A:CC:01:C4":
data_dict = {address: {}}
def deserialize(buffer_dict, result_dict, address):
for char in buffer_dict[address]:
# start of new dataset
if char == 'D' or end_flag[address] is True:
# 2nd part of dataset lost or '>' lost in transmission
if start_flag[address] is True:
try:
# if only '>' lost in transmission, can keep dataset. Else delete
if checksum_dict[address] != int(datastring_dict[address]):
del result_dict[address][dataset_count_dict[address]]
except Exception: # 2nd part of dataset lost
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
# reset datastring to prepare for next dataset
datastring_dict[address] = ""
# reset checksum to prepare for next dataset
checksum_dict[address] = 0
comma_count_dict[address] = 0
dataset_count_dict[address] += 1
timestamp_flag_dict[address] = True
checksum_dict[address] ^= ord(char)
start_flag[address] = True
end_flag[address] = False
if char != 'D' and char != ',' and char != '|' and char != '>' and (char == '-' or char == '.' or float_flag_dict[address] is True or timestamp_flag_dict[address] is True):
datastring_dict[address] += char
checksum_dict[address] ^= ord(char)
elif char == ' ':
datastring_dict[address] += char
checksum_dict[address] ^= ord(char)
elif char == ',': # next value
comma_count_dict[address] += 1
checksum_dict[address] ^= ord(char)
# already past timestamp value
if comma_count_dict[address] == 1:
timestamp_flag_dict[address] = False
try:
result_dict[address].setdefault(
dataset_count_dict[address], []).append(int(datastring_dict[address]))
except Exception:
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
float_flag_dict[address] = True
elif comma_count_dict[address] < 5: # yaw, pitch, roll floats
try:
result_dict[address][dataset_count_dict[address]].append(
float("{0:.2f}".format((int(datastring_dict[address]) / divide_get_float))))
except Exception:
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
else: # accelerometer integers
try:
result_dict[address][dataset_count_dict[address]].append(
int(int(datastring_dict[address]) / divide_get_float))
except Exception:
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
datastring_dict[address] = ""
elif char == '>': # end of current dataset
# print("ultra96 checksum: %i" % (checksum_dict[address]))
# print("beetle checksum: %i" % (int(datastring_dict[address])))
# received dataset is invalid; drop the dataset from data dictionary
try:
if checksum_dict[address] != int(datastring_dict[address]):
del result_dict[address][dataset_count_dict[address]]
except Exception:
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
# reset datastring to prepare for next dataset
datastring_dict[address] = ""
# reset checksum to prepare for next dataset
checksum_dict[address] = 0
comma_count_dict[address] = 0
start_flag[address] = False
end_flag[address] = True
# missing data in previous dataset
try:
if len(result_dict[address][list(result_dict[address].keys())[-1]]) < 7:
del result_dict[address][list(
result_dict[address].keys())[-1]]
except Exception as e:
print(e)
print("error in processData in line 379")
elif char == '|' or (float_flag_dict[address] is False and timestamp_flag_dict[address] is False):
if float_flag_dict[address] is True:
try:
result_dict[address][dataset_count_dict[address]].append(
int(int(datastring_dict[address]) / divide_get_float))
except Exception:
try:
del result_dict[address][dataset_count_dict[address]]
except Exception:
pass
# clear datastring to prepare take in checksum from beetle
datastring_dict[address] = ""
float_flag_dict[address] = False
elif char != '|' and char != '>':
datastring_dict[address] += char
try:
if len(result_dict[address][list(result_dict[address].keys())[-1]]) < 7:
del result_dict[address][list(
result_dict[address].keys())[-1]]
except Exception as e:
print(e)
print("error in processData in line 478")
for character in "\r\n":
buffer_dict[address] = buffer_dict[address].replace(character, "")
deserialize(buffer_dict, data_dict, address)
dataset_count_dict[address] = 0
return data_dict
def parse_data(dic_data, beetle):
# collect hand data
data = []
for v in dic_data[beetle].values():
ypr = [] # yaw, pitch, roll
for i in range(1, 7):
ypr.append(v[i])
data.append(ypr)
return (data)
def predict_beetle(beetle_data, model):
pred_arr = model.predict(beetle_data)
unique, counts = numpy.unique(pred_arr, return_counts=True)
pred_count = dict(zip(unique, counts))
prediction = max(pred_count.items(), key=operator.itemgetter(1))[0]
return prediction
# Program to find most frequent element in a list
def most_frequent_prediction(pred_list):
return max(set(pred_list), key=pred_list.count)
def find_new_position(ground_truth, b1_move, b2_move, b3_move):
# ground_truth = [3, 2, 1]
# p1_movement = 'R'
# p2_movement = 'S'
# p3_movement = 'L'
dic = {1: b1_move, 2: b2_move, 3: b3_move}
p1_movement = dic[ground_truth[0]]
p2_movement = dic[ground_truth[1]]
p3_movement = dic[ground_truth[2]]
if p1_movement == "R" and p2_movement == "S" and p3_movement == "L":
# output = [3, 2, 1]
output = [ground_truth[2], ground_truth[1], ground_truth[0]]
elif p1_movement == "R" and p2_movement == "L" and p3_movement == "S":
# output = [2, 1, 3]
output = [ground_truth[1], ground_truth[0], ground_truth[2]]
elif p1_movement == "R" and p2_movement == "L" and p3_movement == "L":
# output = [2, 3, 1]
output = [ground_truth[1], ground_truth[2], ground_truth[0]]
elif p1_movement == "S" and p2_movement == "R" and p3_movement == "L":
# output = [1, 3, 2]
output = [ground_truth[0], ground_truth[2], ground_truth[1]]
elif p1_movement == "S" and p2_movement == "L" and p3_movement == "S":
# output = [2, 1, 3]
output = [ground_truth[1], ground_truth[0], ground_truth[2]]
else:
# output = [1, 2, 3]
output = ground_truth
position = str(output[0]) + " " + str(output[1]) + " " + str(output[2])
return position
def eval_1beetle(beetle_dict_1, beetle_1):
# Get beetle data from dictionaries
beetle1_data = parse_data(beetle_dict_1, beetle_1)
# Predict dance move of each beetle
#beetle1_dance = predict_beetle(beetle1_data, mlp_dance)
pred_arr = mlp_dance.predict(beetle1_data)
unique, counts = numpy.unique(pred_arr, return_counts=True)
pred_count = dict(zip(unique, counts))
beetle1_dance = max(pred_count.items(), key=operator.itemgetter(1))[0]
return beetle1_dance
def normalise_data(data):
try:
scaler = StandardScaler()
scaler.fit(data)
data = scaler.transform(data)
return data
except Exception as e:
return data
if __name__ == '__main__':
# 50:F1:4A:CB:FE:EE: position 1, 1C:BA:8C:1D:30:22: position 2, 78:DB:2F:BF:2C:E2: position 3
start_time = time.time()
# global variables
"""
beetle_addresses = ["50:F1:4A:CC:01:C4", "50:F1:4A:CB:FE:EE", "78:DB:2F:BF:2C:E2",
"1C:BA:8C:1D:30:22"]
"""
beetle_addresses = ["50:F1:4A:CC:01:C4", "50:F1:4A:CB:FE:EE", "78:DB:2F:BF:2C:E2",
"1C:BA:8C:1D:30:22"]
divide_get_float = 100.0
global_delegate_obj = []
global_beetle = []
handshake_flag_dict = {"50:F1:4A:CB:FE:EE": True,
"78:DB:2F:BF:2C:E2": True, "1C:BA:8C:1D:30:22": True}
emg_buffer = {"50:F1:4A:CC:01:C4": ""}
buffer_dict = {"50:F1:4A:CB:FE:EE": "",
"78:DB:2F:BF:2C:E2": "", "1C:BA:8C:1D:30:22": ""}
incoming_data_flag = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
ground_truth = [1, 2, 3]
ACTIONS = ['muscle', 'weightlifting', 'shoutout']
POSITIONS = ['1 2 3', '3 2 1', '2 3 1', '3 1 2', '1 3 2', '2 1 3']
beetle1 = "50:F1:4A:CB:FE:EE"
beetle2 = "78:DB:2F:BF:2C:E2"
beetle3 = "1C:BA:8C:1D:30:22"
dance = "shoutout"
new_pos = "1 2 3"
# data global variables
num_datasets = 200
beetle1_data_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_data_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_data_dict = {"1C:BA:8C:1D:30:22": {}}
beetle1_moving_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_moving_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_moving_dict = {"1C:BA:8C:1D:30:22": {}}
beetle1_dancing_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_dancing_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_dancing_dict = {"1C:BA:8C:1D:30:22": {}}
datastring_dict = {"50:F1:4A:CB:FE:EE": "",
"78:DB:2F:BF:2C:E2": "", "1C:BA:8C:1D:30:22": ""}
packet_count_dict = {"50:F1:4A:CC:01:C4": 0, "50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
dataset_count_dict = {"50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
float_flag_dict = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
timestamp_flag_dict = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
comma_count_dict = {"50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
checksum_dict = {"50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
start_flag = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
end_flag = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
# clock synchronization global variables
dance_count = 0
clocksync_flag_dict = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
timestamp_dict = {"50:F1:4A:CB:FE:EE": [],
"78:DB:2F:BF:2C:E2": [], "1C:BA:8C:1D:30:22": []}
clock_offset_dict = {"50:F1:4A:CB:FE:EE": [],
"78:DB:2F:BF:2C:E2": [], "1C:BA:8C:1D:30:22": []}
[global_delegate_obj.append(0) for idx in range(len(beetle_addresses))]
[global_beetle.append(0) for idx in range(len(beetle_addresses))]
try:
eval_client = eval_client.Client("192.168.43.6", 8080, 6, "cg40024002group6")
except Exception as e:
print(e)
try:
board_client = dashBoardClient.Client("192.168.43.248", 8080, 6, "cg40024002group6")
except Exception as e:
print(e)
establish_connection("50:F1:4A:CC:01:C4")
time.sleep(2)
establish_connection("78:DB:2F:BF:2C:E2")
time.sleep(3)
# Load MLP NN model
mlp_dance = load('mlp_dance_LATEST.joblib')
establish_connection("50:F1:4A:CB:FE:EE")
time.sleep(3)
# Load Movement ML
mlp_move = load('mlp_movement_LATEST.joblib')
establish_connection("1C:BA:8C:1D:30:22")
with concurrent.futures.ThreadPoolExecutor(max_workers=7) as data_executor:
for beetle in global_beetle:
if beetle.addr == "50:F1:4A:CC:01:C4":
data_executor.submit(getEMGData, beetle)
data_executor.shutdown(wait=True)
# start collecting data only after 1 min passed
while True:
elapsed_time = time.time() - start_time
if int(elapsed_time) >= 60:
break
else:
print(elapsed_time)
time.sleep(1)
"""
for beetle in global_beetle:
print(beetle.addr)
emg_thread = EMGThread(global_beetle[3])
"""
while True:
with concurrent.futures.ThreadPoolExecutor(max_workers=7) as data_executor:
{data_executor.submit(getDanceData, beetle): beetle for beetle in global_beetle}
data_executor.shutdown(wait=True)
"""
with concurrent.futures.ThreadPoolExecutor(max_workers=7) as data_executor:
data_executor.submit(getEMGData, global_beetle[0])
data_executor.shutdown(wait=True)
"""
# do calibration once every 4 moves; change 4 to other values according to time calibration needs
if dance_count == 1:
print("Proceed to do time calibration...")
# clear clock_offset_dict for next time calibration
for beetle in global_beetle:
if beetle.addr != "50:F1:4A:CC:01:C4":
initHandshake(beetle)
if dance_count == 1:
dance_count = 0
dance_count += 1
pool = multiprocessing.Pool()
workers = [pool.apply_async(processData, args=(address, ))
for address in beetle_addresses]
result = [worker.get() for worker in workers]
pool.close()
try:
# change to 1 if using emg beetle, 0 if not using
for idx in range(1, len(result)):
for address in result[idx].keys():
if address == "50:F1:4A:CB:FE:EE":
beetle1_data_dict[address] = result[idx][address]
elif address == "78:DB:2F:BF:2C:E2":
beetle2_data_dict[address] = result[idx][address]
elif address == "1C:BA:8C:1D:30:22":
beetle3_data_dict[address] = result[idx][address]
except Exception as e:
pass
try:
for dataset_num, dataset_list in beetle1_data_dict["50:F1:4A:CB:FE:EE"].items():
if dataset_list[0] == 0: # moving data
beetle1_moving_dict["50:F1:4A:CB:FE:EE"].update(
{dataset_num: dataset_list})
else: # dancing data
beetle1_dancing_dict["50:F1:4A:CB:FE:EE"].update(
{dataset_num: dataset_list})
except Exception as e:
pass
try:
for dataset_num, dataset_list in beetle2_data_dict["78:DB:2F:BF:2C:E2"].items():
if dataset_list[0] == 0: # moving data
beetle2_moving_dict["78:DB:2F:BF:2C:E2"].update(
{dataset_num: dataset_list})
else: # dancing data
beetle2_dancing_dict["78:DB:2F:BF:2C:E2"].update(
{dataset_num: dataset_list})
except Exception as e:
pass
try:
for dataset_num, dataset_list in beetle3_data_dict["1C:BA:8C:1D:30:22"].items():
if dataset_list[0] == 0: # moving data
beetle3_moving_dict["1C:BA:8C:1D:30:22"].update(
{dataset_num: dataset_list})
else: # dancing data
beetle3_dancing_dict["1C:BA:8C:1D:30:22"].update(
{dataset_num: dataset_list})
except Exception as e:
pass
# clear buffer for next move
for address in buffer_dict.keys():
buffer_dict[address] = ""
# print(beetle1_data_dict)
# print(beetle2_data_dict)
# print(beetle3_data_dict)
with open(r'position.txt', 'a') as file:
file.write(json.dumps(beetle1_moving_dict) + "\n")
file.write(json.dumps(beetle2_moving_dict) + "\n")
file.write(json.dumps(beetle3_moving_dict) + "\n")
file.close()
with open(r'dance.txt', 'a') as file:
file.write(json.dumps(beetle1_dancing_dict) + "\n")
file.write(json.dumps(beetle2_dancing_dict) + "\n")
file.write(json.dumps(beetle3_dancing_dict) + "\n")
file.close()
# synchronization delay
try:
beetle1_time_ultra96 = calculate_ultra96_time(
beetle1_dancing_dict, clock_offset_dict["50:F1:4A:CB:FE:EE"][0])
beetle2_time_ultra96 = calculate_ultra96_time(
beetle2_dancing_dict, clock_offset_dict["78:DB:2F:BF:2C:E2"][0])
beetle3_time_ultra96 = calculate_ultra96_time(
beetle3_dancing_dict, clock_offset_dict["1C:BA:8C:1D:30:22"][0])
sync_delay = calculate_sync_delay(beetle1_time_ultra96, beetle2_time_ultra96, beetle3_time_ultra96)
except Exception as e:
print(e)
print("use default sync delay")
sync_delay = 950
# print("Beetle 1 ultra 96 time: ", beetle1_time_ultra96)
# print("Beetle 2 ultra 96 time: ", beetle2_time_ultra96)
# print("Beetle 3 ultra 96 time: ", beetle3_time_ultra96)
print("Synchronization delay is: ", sync_delay)
# machine learning
# ml_result = get_prediction(beetle1_data_dict)
"""
"""
beetle1_moving_dict = parse_data(beetle1_moving_dict, beetle1)
beetle2_moving_dict = parse_data(beetle2_moving_dict, beetle2)
beetle3_moving_dict = parse_data(beetle3_moving_dict, beetle3)
beetle1_moving_dict = normalise_data(beetle1_moving_dict)
beetle2_moving_dict = normalise_data(beetle2_moving_dict)
beetle3_moving_dict = normalise_data(beetle3_moving_dict)
# Predict movement direction of each beetle
try:
beetle1_move = predict_beetle(beetle1_moving_dict, mlp_move)
except Exception as e:
beetle1_move = 'S'
try:
beetle2_move = predict_beetle(beetle2_moving_dict, mlp_move)
except Exception as e:
beetle2_move = 'S'
try:
beetle3_move = predict_beetle(beetle3_moving_dict, mlp_move)
except Exception as e:
beetle3_move = 'S'
# Find new position
new_pos = find_new_position(
ground_truth, beetle1_move, beetle2_move, beetle3_move)
# PREDICT DANCE
if beetle1_dancing_dict[beetle1] and beetle2_dancing_dict[beetle2] and beetle3_dancing_dict[beetle3]:
# Get DANCE data from dictionaries in arguments
beetle1_dance_data = parse_data(beetle1_dancing_dict, beetle1)
beetle2_dance_data = parse_data(beetle2_dancing_dict, beetle2)
beetle3_dance_data = parse_data(beetle3_dancing_dict, beetle3)
# print(beetle1_data)
# Normalise DANCE data
beetle1_dance_data_norm = normalise_data(beetle1_dance_data)
beetle2_dance_data_norm = normalise_data(beetle2_dance_data)
beetle3_dance_data_norm = normalise_data(beetle3_dance_data)
# print(beetle1_data_norm)
# Predict DANCE of each beetle
beetle1_dance = predict_beetle(beetle1_dance_data_norm, mlp_dance)
beetle2_dance = predict_beetle(beetle2_dance_data_norm, mlp_dance)
beetle3_dance = predict_beetle(beetle3_dance_data_norm, mlp_dance)
# print(beetle1_dance)
dance_predictions = [beetle1_dance, beetle2_dance, beetle3_dance]
dance = most_frequent_prediction(dance_predictions)
elif beetle2_dancing_dict[beetle2] and beetle3_dancing_dict[beetle3]:
dance = eval_1beetle(beetle2_dancing_dict, beetle2)
elif beetle1_dancing_dict[beetle1] and beetle3_dancing_dict[beetle3]:
dance = eval_1beetle(beetle1_dancing_dict, beetle1)
elif beetle1_dancing_dict[beetle1] and beetle2_dancing_dict[beetle2]:
dance = eval_1beetle(beetle1_dancing_dict, beetle1)
elif beetle1_dancing_dict[beetle1]:
dance = eval_1beetle(beetle1_dancing_dict, beetle1)
elif beetle2_dancing_dict[beetle2]:
dance = eval_1beetle(beetle2_dancing_dict, beetle2)
elif beetle3_dancing_dict[beetle3]:
dance = eval_1beetle(beetle3_dancing_dict, beetle3)
else:
# RNG
dance = random.choice(ACTIONS)
print(dance)
print(new_pos)
# send data to eval and dashboard server
eval_client.send_data(new_pos, dance, str(sync_delay))
ground_truth = eval_client.receive_dancer_position().split(' ')
ground_truth = [int(ground_truth[0]), int(
ground_truth[1]), int(ground_truth[2])]
final_string = dance + " " + new_pos
board_client.send_data_to_DB("MLDancer1", final_string)
beetle1_moving_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_moving_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_moving_dict = {"1C:BA:8C:1D:30:22": {}}
beetle1_dancing_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_dancing_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_dancing_dict = {"1C:BA:8C:1D:30:22": {}}
| [
"random.choice",
"numpy.unique",
"bluepy.btle.DefaultDelegate.__init__",
"concurrent.futures.ThreadPoolExecutor",
"bluepy.btle.Peripheral",
"dashBoardClient.Client",
"json.dumps",
"time.sleep",
"sklearn.preprocessing.StandardScaler",
"multiprocessing.Pool",
"joblib.load",
"operator.itemgetter"... | [((469, 518), 'bluepy.btle.UUID', 'btle.UUID', (['"""0000dfb1-0000-1000-8000-00805f9b34fb"""'], {}), "('0000dfb1-0000-1000-8000-00805f9b34fb')\n", (478, 518), False, 'from bluepy import btle\n'), ((25115, 25157), 'numpy.unique', 'numpy.unique', (['pred_arr'], {'return_counts': '(True)'}), '(pred_arr, return_counts=True)\n', (25127, 25157), False, 'import numpy\n'), ((27115, 27157), 'numpy.unique', 'numpy.unique', (['pred_arr'], {'return_counts': '(True)'}), '(pred_arr, return_counts=True)\n', (27127, 27157), False, 'import numpy\n'), ((27648, 27659), 'time.time', 'time.time', ([], {}), '()\n', (27657, 27659), False, 'import time\n'), ((31358, 31371), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (31368, 31371), False, 'import time\n'), ((31423, 31436), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (31433, 31436), False, 'import time\n'), ((31477, 31508), 'joblib.load', 'load', (['"""mlp_dance_LATEST.joblib"""'], {}), "('mlp_dance_LATEST.joblib')\n", (31481, 31508), False, 'from joblib import dump, load\n'), ((31560, 31573), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (31570, 31573), False, 'import time\n'), ((31613, 31647), 'joblib.load', 'load', (['"""mlp_movement_LATEST.joblib"""'], {}), "('mlp_movement_LATEST.joblib')\n", (31617, 31647), False, 'from joblib import dump, load\n'), ((600, 635), 'bluepy.btle.DefaultDelegate.__init__', 'btle.DefaultDelegate.__init__', (['self'], {}), '(self)\n', (629, 635), False, 'from bluepy import btle\n'), ((27356, 27372), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (27370, 27372), False, 'from sklearn.preprocessing import StandardScaler\n'), ((31043, 31106), 'eval_client.Client', 'eval_client.Client', (['"""192.168.43.6"""', '(8080)', '(6)', '"""cg40024002group6"""'], {}), "('192.168.43.6', 8080, 6, 'cg40024002group6')\n", (31061, 31106), False, 'import eval_client\n'), ((31189, 31258), 'dashBoardClient.Client', 'dashBoardClient.Client', (['"""192.168.43.248"""', '(8080)', '(6)', '"""cg40024002group6"""'], {}), "('192.168.43.248', 8080, 6, 'cg40024002group6')\n", (31211, 31258), False, 'import dashBoardClient\n'), ((31709, 31761), 'concurrent.futures.ThreadPoolExecutor', 'concurrent.futures.ThreadPoolExecutor', ([], {'max_workers': '(7)'}), '(max_workers=7)\n', (31746, 31761), False, 'import concurrent\n'), ((33307, 33329), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (33327, 33329), False, 'import multiprocessing\n'), ((724, 735), 'time.time', 'time.time', ([], {}), '()\n', (733, 735), False, 'import time\n'), ((7564, 7575), 'time.time', 'time.time', ([], {}), '()\n', (7573, 7575), False, 'import time\n'), ((32071, 32082), 'time.time', 'time.time', ([], {}), '()\n', (32080, 32082), False, 'import time\n'), ((32208, 32221), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (32218, 32221), False, 'import time\n'), ((32373, 32425), 'concurrent.futures.ThreadPoolExecutor', 'concurrent.futures.ThreadPoolExecutor', ([], {'max_workers': '(7)'}), '(max_workers=7)\n', (32410, 32425), False, 'import concurrent\n'), ((11998, 12011), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (12008, 12011), False, 'import time\n'), ((12278, 12291), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12288, 12291), False, 'import time\n'), ((25246, 25268), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (25265, 25268), False, 'import operator\n'), ((27249, 27271), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (27268, 27271), False, 'import operator\n'), ((40711, 40748), 'eval_client.receive_dancer_position', 'eval_client.receive_dancer_position', ([], {}), '()\n', (40746, 40748), False, 'import eval_client\n'), ((7839, 7850), 'time.time', 'time.time', ([], {}), '()\n', (7848, 7850), False, 'import time\n'), ((35911, 35942), 'json.dumps', 'json.dumps', (['beetle1_moving_dict'], {}), '(beetle1_moving_dict)\n', (35921, 35942), False, 'import json\n'), ((35974, 36005), 'json.dumps', 'json.dumps', (['beetle2_moving_dict'], {}), '(beetle2_moving_dict)\n', (35984, 36005), False, 'import json\n'), ((36037, 36068), 'json.dumps', 'json.dumps', (['beetle3_moving_dict'], {}), '(beetle3_moving_dict)\n', (36047, 36068), False, 'import json\n'), ((36171, 36203), 'json.dumps', 'json.dumps', (['beetle1_dancing_dict'], {}), '(beetle1_dancing_dict)\n', (36181, 36203), False, 'import json\n'), ((36235, 36267), 'json.dumps', 'json.dumps', (['beetle2_dancing_dict'], {}), '(beetle2_dancing_dict)\n', (36245, 36267), False, 'import json\n'), ((36299, 36331), 'json.dumps', 'json.dumps', (['beetle3_dancing_dict'], {}), '(beetle3_dancing_dict)\n', (36309, 36331), False, 'import json\n'), ((11165, 11189), 'bluepy.btle.Peripheral', 'btle.Peripheral', (['address'], {}), '(address)\n', (11180, 11189), False, 'from bluepy import btle\n'), ((40508, 40530), 'random.choice', 'random.choice', (['ACTIONS'], {}), '(ACTIONS)\n', (40521, 40530), False, 'import random\n'), ((2846, 2857), 'time.time', 'time.time', ([], {}), '()\n', (2855, 2857), False, 'import time\n')] |
import os
import keras
import skimage.io
import keras_contrib.applications
from metrics import *
from mrcnn import utils
from mrcnn import config
from imgaug import augmenters as iaa
from dataset import Dataset, PoseEstimationDataset
import numpy as np
import keras.backend as K
import mrcnn.model as modellib
class Config(config.Config):
NAME = 'LEGOVNO'
IMAGES_PER_GPU = 1
GPU_COUNT = 1
NUM_CLASSES = 4
STEPS_PER_EPOCH = 1000
DETECTION_MIN_CONFIDENCE = 0.9
BACKBONE = 'resnet101'
IMAGE_MIN_DIM = 1024
IMAGE_MAX_DIM = 1024
class InferenceConfig(Config):
pass
class Model:
TRAIN = 0
INFERENCE = 1
COCO_WEIGHTS_PATH = './mask_rcnn_coco.h5'
WEIGHT_LOADERS = {
'coco': lambda self: self.__load_coco(),
'last': lambda self: self.model.find_last()[1],
'imagenet': lambda self: self.model.get_imagenet_weights()
}
def __init__(self, weights, mode, logs='./logs'):
assert mode in (self.TRAIN, self.INFERENCE), 'Unrecognised mode'
self.config = Config() if mode == self.TRAIN else InferenceConfig()
self.model = modellib.MaskRCNN(mode='training' if mode == self.TRAIN else 'inference',
config=self.config, model_dir=logs)
lweights = weights.lower()
weights_path = self.WEIGHT_LOADERS[lweights](self) if lweights in self.WEIGHT_LOADERS else weights
self.model.load_weights(weights_path, by_name=True,
exclude=['mrcnn_class_logits', 'mrcnn_bbox_fc', 'mrcnn_bbox', 'mrcnn_mask'] if lweights == 'coco' else [])
def train(self, data, epochs=30, learning_rate=1e-3):
train_dataset = Dataset.load_and_prepare(data.root.train[:], data)
test_dataset = Dataset.load_and_prepare(data.root.test[:], data)
self.model.train(train_dataset, test_dataset, learning_rate=learning_rate, epochs=epochs, layers='all')
def detect(self, image, verbose=1):
return self.model.detect([image], verbose=verbose)[0]
def detect_file(self, path, verbose=1):
return self.detect(skimage.io.imread(path), verbose)
def __load_coco(self):
if not os.path.exists(self.COCO_WEIGHTS_PATH):
utils.download_trained_weights(self.COCO_WEIGHTS_PATH)
return self.COCO_WEIGHTS_PATH
class ActivationLayer(keras.engine.topology.Layer):
def __init__(self, **kwargs):
super(ActivationLayer, self).__init__(**kwargs)
def build(self, input_shape):
super(ActivationLayer, self).build(input_shape)
def call(self, x):
return x / K.expand_dims(K.sqrt(K.sum(K.square(x), axis=-1)))
def compute_output_shape(self, input_shape):
return (input_shape[0], 4)
class PoseEstimationConfig:
BACKBONE = 'resnet18'
INPUT_SHAPE = (300, 400, 1)
SHARED_LAYERS = 0
SHARED_UNITS = 1024
POSITION_LAYERS = 0
POSITION_UNITS = 1024
ORIENTATION_LAYERS = 0
ORIENTATION_UNITS = 1024
BATCH_SIZE = 32
VALIDATION_BATCH_SIZE = 1
OPTIMIZER = keras.optimizers.Adam(lr=1e-3)
LOSSES = [
MeshLoss(
['../models/1x1.obj', '../models/1x2.obj', '../models/1x3.obj'],
SequentialLoss(
[
RotationTransform(extract_quaternion),
OffsetTransform(extract_offset)
],
DiffMean
)
)
]
METRICS = [
QuaternionDistanceMetric(extract_quaternion),
QuaternionAngleMetric(extract_quaternion),
DistanceMetric(extract_offset)
]
SAVE_PERIOD = 10
STEPS_PER_EPOCH = None
VALIDATION_STEPS = None
AUGMENTER = iaa.Sequential(
[
iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 3))),
iaa.Multiply((0.5, 1.5))
],
random_order=True
)
class PoseEstimationModel():
BACKBONES = {
'resnet18': lambda input_shape:
PoseEstimationModel.__resnet(input_shape, 'basic', [2, 2, 2, 2]),
'resnet34': lambda input_shape:
PoseEstimationModel.__resnet(input_shape, 'basic', [3, 4, 6, 3]),
'resnet50': lambda input_shape:
PoseEstimationModel.__resnet(input_shape, 'bottleneck', [3, 4, 6, 3]),
'xception': lambda input_shape:
keras.applications.xception.Xception(include_top=False, weights=None, input_shape=input_shape, classes=None)
}
def __init__(self, config=None, weights=None, logs='./logs'):
if not config:
config = PoseEstimationConfig()
if not os.path.exists(logs):
os.makedirs(logs)
backbone = PoseEstimationModel.BACKBONES[config.BACKBONE](config.INPUT_SHAPE)
output = backbone.output
output = keras.layers.Flatten()(output)
for i in range(config.SHARED_LAYERS):
output = keras.layers.Dense(config.SHARED_UNITS, activation='relu')(output)
model = keras.models.Model(inputs=backbone.input, outputs=keras.layers.concatenate([
PoseEstimationModel.__make_fc_layers(output, config.POSITION_LAYERS, config.POSITION_UNITS, 3),
ActivationLayer()(PoseEstimationModel.__make_fc_layers(output, config.ORIENTATION_LAYERS, config.ORIENTATION_UNITS, 4))
]))
model.compile(
optimizer=config.OPTIMIZER,
loss=config.LOSSES,
metrics=config.METRICS
)
if weights:
model.load_weights(weights)
self.model, self.config, self.logs = model, config, logs
def train(self, data, epochs, initial_epoch=0):
train_dataset = PoseEstimationDataset(data.root.train[:], data, self.config.BATCH_SIZE, self.config.AUGMENTER)
test_dataset = PoseEstimationDataset(data.root.test[:], data,
self.config.BATCH_SIZE if self.config.BATCH_SIZE else self.config.VALIDATION_BATCH_SIZE)
save_best = keras.callbacks.ModelCheckpoint(
os.path.join(self.logs, 'weights.{epoch:04d}.hdf5'),
verbose=0,
save_weights_only=True,
period=self.config.SAVE_PERIOD
)
reduce_lr = keras.callbacks.ReduceLROnPlateau(
monitor='loss', factor=0.2,
patience=5, min_lr=0.00001)
tensorboard = keras.callbacks.TensorBoard(log_dir=self.logs)
self.model.fit_generator(
train_dataset,
validation_data=test_dataset,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
epochs=epochs,
callbacks=[save_best, reduce_lr, tensorboard],
shuffle=True,
workers=0,
validation_steps=self.config.VALIDATION_STEPS,
initial_epoch=initial_epoch
)
def predict(self, images, batch_size=1, verbose=0):
return self.model.predict(images, batch_size=batch_size, verbose=verbose)
def evaluate(self, images, y, batch_size=1, verbose=0):
return self.model.evaluate(images, y, batch_size=batch_size, verbose=verbose)
@staticmethod
def __make_fc_layers(inputs, count, units, last_units):
for i in range(count - 1):
inputs = keras.layers.Dense(units, activation='relu')(inputs)
return keras.layers.Dense(last_units)(inputs)
@staticmethod
def __resnet(input_shape, block, repetitions):
return keras_contrib.applications.resnet.ResNet(input_shape, None, block, repetitions=repetitions, include_top=False)
| [
"keras.optimizers.Adam",
"mrcnn.model.MaskRCNN",
"os.path.exists",
"keras.layers.Flatten",
"mrcnn.utils.download_trained_weights",
"os.makedirs",
"keras.callbacks.ReduceLROnPlateau",
"imgaug.augmenters.GaussianBlur",
"keras.backend.square",
"os.path.join",
"dataset.PoseEstimationDataset",
"ker... | [((2767, 2798), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (2788, 2798), False, 'import keras\n'), ((1051, 1164), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': "('training' if mode == self.TRAIN else 'inference')", 'config': 'self.config', 'model_dir': 'logs'}), "(mode='training' if mode == self.TRAIN else 'inference',\n config=self.config, model_dir=logs)\n", (1068, 1164), True, 'import mrcnn.model as modellib\n'), ((1536, 1586), 'dataset.Dataset.load_and_prepare', 'Dataset.load_and_prepare', (['data.root.train[:]', 'data'], {}), '(data.root.train[:], data)\n', (1560, 1586), False, 'from dataset import Dataset, PoseEstimationDataset\n'), ((1604, 1653), 'dataset.Dataset.load_and_prepare', 'Dataset.load_and_prepare', (['data.root.test[:]', 'data'], {}), '(data.root.test[:], data)\n', (1628, 1653), False, 'from dataset import Dataset, PoseEstimationDataset\n'), ((4926, 5024), 'dataset.PoseEstimationDataset', 'PoseEstimationDataset', (['data.root.train[:]', 'data', 'self.config.BATCH_SIZE', 'self.config.AUGMENTER'], {}), '(data.root.train[:], data, self.config.BATCH_SIZE,\n self.config.AUGMENTER)\n', (4947, 5024), False, 'from dataset import Dataset, PoseEstimationDataset\n'), ((5038, 5177), 'dataset.PoseEstimationDataset', 'PoseEstimationDataset', (['data.root.test[:]', 'data', '(self.config.BATCH_SIZE if self.config.BATCH_SIZE else self.config.\n VALIDATION_BATCH_SIZE)'], {}), '(data.root.test[:], data, self.config.BATCH_SIZE if\n self.config.BATCH_SIZE else self.config.VALIDATION_BATCH_SIZE)\n', (5059, 5177), False, 'from dataset import Dataset, PoseEstimationDataset\n'), ((5379, 5470), 'keras.callbacks.ReduceLROnPlateau', 'keras.callbacks.ReduceLROnPlateau', ([], {'monitor': '"""loss"""', 'factor': '(0.2)', 'patience': '(5)', 'min_lr': '(1e-05)'}), "(monitor='loss', factor=0.2, patience=5,\n min_lr=1e-05)\n", (5412, 5470), False, 'import keras\n'), ((5502, 5548), 'keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'log_dir': 'self.logs'}), '(log_dir=self.logs)\n', (5529, 5548), False, 'import keras\n'), ((1985, 2023), 'os.path.exists', 'os.path.exists', (['self.COCO_WEIGHTS_PATH'], {}), '(self.COCO_WEIGHTS_PATH)\n', (1999, 2023), False, 'import os\n'), ((2028, 2082), 'mrcnn.utils.download_trained_weights', 'utils.download_trained_weights', (['self.COCO_WEIGHTS_PATH'], {}), '(self.COCO_WEIGHTS_PATH)\n', (2058, 2082), False, 'from mrcnn import utils\n'), ((3328, 3352), 'imgaug.augmenters.Multiply', 'iaa.Multiply', (['(0.5, 1.5)'], {}), '((0.5, 1.5))\n', (3340, 3352), True, 'from imgaug import augmenters as iaa\n'), ((3778, 3890), 'keras.applications.xception.Xception', 'keras.applications.xception.Xception', ([], {'include_top': '(False)', 'weights': 'None', 'input_shape': 'input_shape', 'classes': 'None'}), '(include_top=False, weights=None,\n input_shape=input_shape, classes=None)\n', (3814, 3890), False, 'import keras\n'), ((4017, 4037), 'os.path.exists', 'os.path.exists', (['logs'], {}), '(logs)\n', (4031, 4037), False, 'import os\n'), ((4042, 4059), 'os.makedirs', 'os.makedirs', (['logs'], {}), '(logs)\n', (4053, 4059), False, 'import os\n'), ((4179, 4201), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (4199, 4201), False, 'import keras\n'), ((5229, 5280), 'os.path.join', 'os.path.join', (['self.logs', '"""weights.{epoch:04d}.hdf5"""'], {}), "(self.logs, 'weights.{epoch:04d}.hdf5')\n", (5241, 5280), False, 'import os\n'), ((6310, 6340), 'keras.layers.Dense', 'keras.layers.Dense', (['last_units'], {}), '(last_units)\n', (6328, 6340), False, 'import keras\n'), ((3292, 3322), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', ([], {'sigma': '(0, 3)'}), '(sigma=(0, 3))\n', (3308, 3322), True, 'from imgaug import augmenters as iaa\n'), ((4263, 4321), 'keras.layers.Dense', 'keras.layers.Dense', (['config.SHARED_UNITS'], {'activation': '"""relu"""'}), "(config.SHARED_UNITS, activation='relu')\n", (4281, 4321), False, 'import keras\n'), ((6248, 6292), 'keras.layers.Dense', 'keras.layers.Dense', (['units'], {'activation': '"""relu"""'}), "(units, activation='relu')\n", (6266, 6292), False, 'import keras\n'), ((2393, 2404), 'keras.backend.square', 'K.square', (['x'], {}), '(x)\n', (2401, 2404), True, 'import keras.backend as K\n')] |
from django.conf.urls import include, url
from donations.views import DonateAPI, VerifyAPI
app_name = 'donations'
api_urls = ([
url(r'^donate/$', DonateAPI.as_view(), name="donate"),
url(r'^verify/(?P<pk>[0-9]+)$', VerifyAPI.as_view(), name="verify"),
], "donations")
donations = ([
url(r'^api/', include(api_urls, namespace="api")),
], "donations")
urlpatterns = [
url(r'^', include(donations, namespace="donations"))
]
| [
"django.conf.urls.include",
"donations.views.VerifyAPI.as_view",
"donations.views.DonateAPI.as_view"
] | [((397, 438), 'django.conf.urls.include', 'include', (['donations'], {'namespace': '"""donations"""'}), "(donations, namespace='donations')\n", (404, 438), False, 'from django.conf.urls import include, url\n'), ((153, 172), 'donations.views.DonateAPI.as_view', 'DonateAPI.as_view', ([], {}), '()\n', (170, 172), False, 'from donations.views import DonateAPI, VerifyAPI\n'), ((226, 245), 'donations.views.VerifyAPI.as_view', 'VerifyAPI.as_view', ([], {}), '()\n', (243, 245), False, 'from donations.views import DonateAPI, VerifyAPI\n'), ((313, 347), 'django.conf.urls.include', 'include', (['api_urls'], {'namespace': '"""api"""'}), "(api_urls, namespace='api')\n", (320, 347), False, 'from django.conf.urls import include, url\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 19 23:38:25 2022
@author: goran
"""
from ..general_cp import GeneralCP
from ..cp_utils import to_degrees, dihedral_angle, normal
from math import sqrt, pi, tan, atan2
box_packing_cell_nodes = {'A': (2, 0),
'B': (4, 0),
'C': (2, 2),
'D': (4, 2),
'E': (4, 3),
'F': (3.5, 3.5),
'G': (0, 0)}
angle1 = to_degrees(atan2(sqrt(2)/2, 2))
folded_wall_coords = [ (2, 2, 0),
(2, 1, 2),
(2, 2, 2)]
folded_top_coords = [ (2, 1, 2),
(2, 2, 2),
(1, 2, 2)]
folded_slanted_coords = [(1, 2, 2),
(2, 2, 0),
(2, 1, 2)]
angle1_check = to_degrees(dihedral_angle(normal(folded_top_coords),
normal(folded_slanted_coords)))
print('angle1 = ', angle1)
angle2 = to_degrees(dihedral_angle(normal(folded_wall_coords),
normal(folded_slanted_coords)))
print('angle2 = ', angle2)
box_packing_cell_edges = {'AC': -90,
'BD': 180,
'CD': -180,
'CE': 90 + angle1,
'DE': -180,
'EF': 90 + angle2,
'BG': 0}
def generate_box_packing():
l1 = ((0, 0), (2, 2))
l2 = ((4, 0), (4, 1))
l3 = ((0, 4), (1, 4))
#l4 = ((0, 8), (1, 8))
min_cell = GeneralCP(namednodes = box_packing_cell_nodes,
namededges = box_packing_cell_edges)
#min_cell.save_cp('test0')
c1 = min_cell.add_reflection(l1).add_reflection(l2).add_reflection(l3)#.add_reflection(l4)
c1.save_cp('box_packing_cell')
grid = c1.make_grid(grid_size = (5, 5), overlap_frac = 0.25)
grid.save_cp('box_packing_5x5')
| [
"math.sqrt"
] | [((532, 539), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (536, 539), False, 'from math import sqrt, pi, tan, atan2\n')] |
from .modules.common import *
import numpy as np
import os
from .modules.rs_structs import getRSformat
class RockstarFile(object):
def __init__(self,binfile,data,galaxies,debug):
self.galaxies = galaxies
self.binfile = binfile
self.debug = debug
self.header()
self.halos()
if data == 'particles':
self.particles()
self.f.close()
def header(self):
f = open(self.binfile,'rb')
f.seek(8*3 + 4*10,1)
self.num_halos = np.fromfile(f,dtype=np.int64,count=1)[0]
self.num_particles = np.fromfile(f,dtype=np.int64,count=1)[0]
#print self.num_halos
f.seek(4 + 4 + 8,1)
self.format_revision = np.fromfile(f,dtype=np.int32,count=1)[0]
if self.debug: print('found HALO_FORMAT_REVISION %d (header)' % self.format_revision)
bytes_left = 256 - f.tell()
f.seek(bytes_left,1)
self.f = f
self.halostruct = getRSformat(self)
def halos(self):
#print 'reading %d halos (%d)' % (self.num_halos,self.galaxies)
self.halodata = np.fromfile(self.f,dtype=self.halostruct,count=self.num_halos)
def particles(self):
self.particle_IDs = np.zeros(self.num_particles,dtype=np.int64)
self.particle_IDs.fill(-1)
self.particle_haloIDs = np.zeros(self.num_particles,dtype=np.int64)
self.particle_haloIDs.fill(-1)
nparts = 0
for i in range(0,self.num_halos):
hid = self.halodata[i]['id']
num_p = self.halodata[i]['num_p']
#print '%d %d' % (i,num_p)
pids = np.fromfile(self.f,dtype=np.int64,count=num_p)
self.particle_IDs[nparts:nparts+num_p] = pids
self.particle_haloIDs[nparts:nparts+num_p] = hid
nparts += num_p
#print 'complete'
def compileReturnArray(RS,data):
"""compile data from RS binary and return requested value"""
arr = []
singleval = False
## return particle ID data
if data == 'particles':
npart = 0
for i in range(0,len(RS)):
npart += len(RS[i].particle_IDs)
arr = np.zeros((npart,2),dtype=np.int64)
npart = 0
for i in range(0,len(RS)):
n = len(RS[i].particle_IDs)
arr[npart:npart+n,0] = RS[i].particle_IDs
arr[npart:npart+n,1] = RS[i].particle_haloIDs
npart += n
return arr
## return halo struct data
if data in RS[0].halostruct.names:
singleval = True
if RS[0].debug: print('%s found in halodata' % data)
nhalos = 0
for i in range(0,len(RS)):
nhalos += RS[i].num_halos
if singleval:
arr.extend(RS[i].halodata[data])
else:
arr.extend(RS[i].halodata)
#print nhalos,len(arr)
return np.asarray(arr)
def readrockstargalaxies(binfile,data,**kwargs):
if 'galaxies' in kwargs: del kwargs['galaxies']
arr = readrockstar(binfile,data,galaxies=1,**kwargs)
return arr
def readrockstar(binfile,data,**kwargs):
"""read rockstar binary file
Parameters
----------
binfile : string
path to rockstar binary file. Do NOT include file extention or leading number
data : string
requested data, see readme for details
Examples
--------
>>> halo_mass = readrockstar('/Users/bob/halos_020','m')
>>> halo_mass
array([ 7.25643648e+08, 5.70148608e+08, 3.97376288e+08,
3.66277274e+09, 1.99379231e+10, 5.01039648e+08,
...,
1.58950515e+09, 2.10782208e+09, 8.41401088e+09,
4.14653504e+08], dtype=float32)
"""
galaxies = 0
if 'galaxies' in kwargs and kwargs['galaxies']==1:
galaxies = 1
debug = 0
if 'debug' in kwargs and kwargs['debug']==1:
debug = 1
RS_DATA = []
for j in range(0,5000):
b = '%s.%d.bin' % (binfile,j)
if os.path.isfile(b):
if debug: print('reading %s' % b)
RS_DATA.append(RockstarFile(b,data,galaxies,debug))
else:
break
arr = compileReturnArray(RS_DATA,data)
return arr
| [
"os.path.isfile",
"numpy.fromfile",
"numpy.asarray",
"numpy.zeros"
] | [((2883, 2898), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (2893, 2898), True, 'import numpy as np\n'), ((1123, 1187), 'numpy.fromfile', 'np.fromfile', (['self.f'], {'dtype': 'self.halostruct', 'count': 'self.num_halos'}), '(self.f, dtype=self.halostruct, count=self.num_halos)\n', (1134, 1187), True, 'import numpy as np\n'), ((1252, 1296), 'numpy.zeros', 'np.zeros', (['self.num_particles'], {'dtype': 'np.int64'}), '(self.num_particles, dtype=np.int64)\n', (1260, 1296), True, 'import numpy as np\n'), ((1364, 1408), 'numpy.zeros', 'np.zeros', (['self.num_particles'], {'dtype': 'np.int64'}), '(self.num_particles, dtype=np.int64)\n', (1372, 1408), True, 'import numpy as np\n'), ((2203, 2239), 'numpy.zeros', 'np.zeros', (['(npart, 2)'], {'dtype': 'np.int64'}), '((npart, 2), dtype=np.int64)\n', (2211, 2239), True, 'import numpy as np\n'), ((3983, 4000), 'os.path.isfile', 'os.path.isfile', (['b'], {}), '(b)\n', (3997, 4000), False, 'import os\n'), ((537, 576), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int64', 'count': '(1)'}), '(f, dtype=np.int64, count=1)\n', (548, 576), True, 'import numpy as np\n'), ((607, 646), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int64', 'count': '(1)'}), '(f, dtype=np.int64, count=1)\n', (618, 646), True, 'import numpy as np\n'), ((737, 776), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int32', 'count': '(1)'}), '(f, dtype=np.int32, count=1)\n', (748, 776), True, 'import numpy as np\n'), ((1657, 1705), 'numpy.fromfile', 'np.fromfile', (['self.f'], {'dtype': 'np.int64', 'count': 'num_p'}), '(self.f, dtype=np.int64, count=num_p)\n', (1668, 1705), True, 'import numpy as np\n')] |
import boto3
from datetime import datetime, date
import re
import string
import pandas as pd
from spellchecker import SpellChecker
import uuid
import psycopg2
from psycopg2 import sql
import sys
sys.path.append('.')
from rule_processing import postgresql
def queryTable(conn, table):
cmd = """
SELECT * FROM {}
"""
with conn.cursor() as cur:
cur.execute(sql.SQL(cmd).format(sql.Identifier(table)))
return cur.fetchall()
compr = boto3.client(service_name='comprehend')
compr_m = boto3.client(service_name='comprehendmedical')
spell = SpellChecker()
conn = postgresql.connect()
spelling_list = [x[0] for x in queryTable(conn, 'spellchecker')]
conn.close()
# Add words to spell list
spell.word_frequency.load_words(spelling_list)
def findId(val):
if val == '-1':
return str(uuid.uuid4())
return val
def findUnidentified(val):
if val.lower() == 'unidentified':
return 'U/I'
return val
def convert2CM(height):
if not isinstance(height, str):
return 0
try:
parts = height.split(' ')
unit = parts[1]
if unit == 'CM':
return float(parts[0])
elif unit == 'IN':
quantity_parts = parts[0].replace("'", ' ').replace('"', ' ').split()
foot = quantity_parts[0]
inch = 0
if len(quantity_parts) == 2:
inch = quantity_parts[1]
return float(foot) * 30.48 + float(inch) * 2.54
except:
return 0
def convert2KG(weight):
if not isinstance(weight, str):
return 0
try:
parts = weight.split(' ')
unit = parts[1]
if unit == 'KG':
return float(parts[0])
elif unit == 'LBS':
return 0.453592 * float(parts[0])
except:
return 0
def dob2age(dob):
try:
birthdate = datetime.strptime(dob, '%Y-%m-%d')
today = date.today()
age = today.year - birthdate.year - ((today.month, today.day) < (birthdate.month, birthdate.day))
return age
except:
return 0
def contains_word(sample, text):
return f' {sample} ' in f' {text} '
def preProcessText(col):
"""
Takes in a pandas.Series and preprocesses the text
"""
reponct = string.punctuation.replace("?","").replace("/","")
rehtml = re.compile('<.*>')
extr = col.str.strip()
extr = extr.str.replace(rehtml, '', regex=True)
extr = extr.str.translate(str.maketrans('','',reponct))
extr = extr.str.replace('[^0-9a-zA-Z?/ ]+', ' ', regex=True)
extr = extr.str.replace('\s+', ' ', regex=True)
extr = extr.str.lower()
return extr
def checkSpelling(text: str):
words = text.split()
return ' '.join([spell.correction(word) for word in words])
def replace_conjunctions(conj_list, text: str, info_list):
temp_text = f' {text} '
for conj in conj_list:
if contains_word(conj[0],text):
info_list.append(conj[1])
temp_text = temp_text.replace(f' {conj[0]} ', f' {conj[1]} ')
return temp_text[1:len(temp_text)-1]
def find_all_entities(data: str):
if not data:
return []
try:
result = compr_m.detect_entities_v2(Text=data)
return result['Entities']
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def infer_icd10_cm(data: str, med_cond, diagnosis, symptoms):
"""
:data type: string to pass through Comprehend Medical icd10_cm
:med_cond type: List[]
:diagnosis type: List[]
:symptoms type: List[]
"""
if not data:
return
try:
icd10_result = compr_m.infer_icd10_cm(Text=data)
for resp in icd10_result['Entities']:
if resp['Score'] > 0.4:
resp_str = resp['Text']
category = ''
# first check Attributes
for attr in resp['Attributes']:
if attr['Score'] > 0.4:
if attr['Type'] == 'ACUITY' or attr['Type'] == 'DIRECTION':
resp_str = f'{attr["Text"]}' + ' ' + resp_str
elif attr['Type'] == 'SYSTEM_ORGAN_SITE':
resp_str = resp_str + ' ' + f'{attr["Text"]}'
for trait in resp['Traits']:
if trait['Score'] > 0.4:
if trait['Name'] == 'NEGATION':
category = 'NEG'
break #don't save anything for negation
elif trait['Name'] == 'SYMPTOM':
category = 'SYMP'
elif trait['Name'] == 'DIAGNOSIS':
category = 'DIAGN'
# add our response string to corresponding list
if not category:
resp_str = checkSpelling(resp_str)
med_cond.append(resp_str)
elif category == 'SYMP':
resp_str = checkSpelling(resp_str)
symptoms.append(resp_str)
elif category == 'DIAGN':
resp_str = checkSpelling(resp_str)
diagnosis.append(resp_str)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def find_key_phrases(data:str, key_phrases, icd10cm_list, anatomy_list):
"""
:data type: string to pass through Comprehend Detect Key Phrases
:key_phrases type: List[]
:icd10cm_list type: List[]
:anatomy_list type: List[]
"""
if not data:
return
try:
kp_result = compr.detect_key_phrases(Text=data, LanguageCode='en')
for resp in kp_result['KeyPhrases']:
placed = False
if resp['Score'] > 0.4:
for icd10cm in icd10cm_list:
if contains_word(icd10cm, resp['Text']):
resp_str = checkSpelling(resp['Text'])
key_phrases.append(resp_str)
placed = True
break
elif contains_word(resp['Text'], icd10cm):
resp_str = checkSpelling(resp['Text'])
key_phrases.append(resp_str)
placed = True
break
if not placed:
for anatomy in anatomy_list:
if contains_word(anatomy, resp['Text']):
resp_str = checkSpelling(resp['Text'])
key_phrases.append(resp_str)
break
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
| [
"rule_processing.postgresql.connect",
"boto3.client",
"re.compile",
"datetime.datetime.strptime",
"spellchecker.SpellChecker",
"uuid.uuid4",
"string.punctuation.replace",
"datetime.date.today",
"sys.path.append",
"psycopg2.sql.Identifier",
"psycopg2.sql.SQL"
] | [((198, 218), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (213, 218), False, 'import sys\n'), ((466, 505), 'boto3.client', 'boto3.client', ([], {'service_name': '"""comprehend"""'}), "(service_name='comprehend')\n", (478, 505), False, 'import boto3\n'), ((516, 562), 'boto3.client', 'boto3.client', ([], {'service_name': '"""comprehendmedical"""'}), "(service_name='comprehendmedical')\n", (528, 562), False, 'import boto3\n'), ((571, 585), 'spellchecker.SpellChecker', 'SpellChecker', ([], {}), '()\n', (583, 585), False, 'from spellchecker import SpellChecker\n'), ((594, 614), 'rule_processing.postgresql.connect', 'postgresql.connect', ([], {}), '()\n', (612, 614), False, 'from rule_processing import postgresql\n'), ((2325, 2343), 're.compile', 're.compile', (['"""<.*>"""'], {}), "('<.*>')\n", (2335, 2343), False, 'import re\n'), ((1857, 1891), 'datetime.datetime.strptime', 'datetime.strptime', (['dob', '"""%Y-%m-%d"""'], {}), "(dob, '%Y-%m-%d')\n", (1874, 1891), False, 'from datetime import datetime, date\n'), ((1908, 1920), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1918, 1920), False, 'from datetime import datetime, date\n'), ((825, 837), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (835, 837), False, 'import uuid\n'), ((2261, 2296), 'string.punctuation.replace', 'string.punctuation.replace', (['"""?"""', '""""""'], {}), "('?', '')\n", (2287, 2296), False, 'import string\n'), ((403, 424), 'psycopg2.sql.Identifier', 'sql.Identifier', (['table'], {}), '(table)\n', (417, 424), False, 'from psycopg2 import sql\n'), ((383, 395), 'psycopg2.sql.SQL', 'sql.SQL', (['cmd'], {}), '(cmd)\n', (390, 395), False, 'from psycopg2 import sql\n')] |
import datetime
from bgmi.script import ScriptBase
from bgmi.utils import parse_episode
class Script(ScriptBase):
class Model(ScriptBase.Model):
bangumi_name = "TEST_BANGUMI"
cover = ""
update_time = "Mon"
due_date = datetime.datetime(2017, 9, 30)
def get_download_url(self):
# fetch and return dict
# ignore they are not same bangumi.
resp = [
{
"title": "[c.c动漫][4月新番][影之诗][ShadowVerse][01][简日][HEVC][1080P][MP4]",
"link": "http://example.com/Bangumi/1/1.torrent",
},
{
"title": "[YMDR][慕留人 -火影忍者新时代-][2017][2][AVC][JAP][BIG5][MP4][1080P]",
"link": "http://example.com/Bangumi/1/2.torrent",
},
{
"title": "[ZXSUB仲夏动漫字幕组][博人传-火影忍者次世代][03][720P繁体][MP4]",
"link": "magnet:?xt=urn:btih:233",
},
]
ret = {}
for item in resp:
e = parse_episode(item["title"])
if e:
ret[e] = item["link"]
return ret
if __name__ == "__main__":
s = Script()
print(s.get_download_url())
| [
"datetime.datetime",
"bgmi.utils.parse_episode"
] | [((256, 286), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(9)', '(30)'], {}), '(2017, 9, 30)\n', (273, 286), False, 'import datetime\n'), ((999, 1027), 'bgmi.utils.parse_episode', 'parse_episode', (["item['title']"], {}), "(item['title'])\n", (1012, 1027), False, 'from bgmi.utils import parse_episode\n')] |
from collections import deque
def fill_the_box(*args):
box_size = args[0] * args[1] * args[2]
args = deque(args[3:])
while args:
curr_arg = args.popleft()
if curr_arg == "Finish":
break
box_size -= curr_arg
if box_size < 0:
args.remove("Finish")
return f"No more free space! You have {sum(args) + abs(box_size)} more cubes."
return f"There is free space in the box. You could put {abs(box_size // 1)} more cubes."
print(fill_the_box(2, 8, 2, 2, 1, 7, 3, 1, 5, "Finish"))
print(fill_the_box(5, 5, 2, 40, 11, 7, 3, 1, 5, "Finish"))
print(fill_the_box(10, 10, 10, 40, "Finish", 2, 15, 30))
| [
"collections.deque"
] | [((111, 126), 'collections.deque', 'deque', (['args[3:]'], {}), '(args[3:])\n', (116, 126), False, 'from collections import deque\n')] |
"""Implementation of allocation API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from treadmill import discovery
from treadmill import context
_LOGGER = logging.getLogger(__name__)
class API(object):
"""Treadmill Local REST api."""
def __init__(self):
def _get(hostname):
"""Get hostname nodeinfo endpoint info."""
_LOGGER.info('Redirect: %s', hostname)
discovery_iter = discovery.iterator(
context.GLOBAL.zk.conn,
'root.%s' % hostname, 'nodeinfo', False
)
for (_app, hostport) in discovery_iter:
if not hostport:
continue
_LOGGER.info('Found: %s - %s', hostname, hostport)
return hostport
_LOGGER.info('nodeinfo not found: %s', hostname)
return None
self.get = _get
| [
"logging.getLogger",
"treadmill.discovery.iterator"
] | [((282, 309), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (299, 309), False, 'import logging\n'), ((556, 643), 'treadmill.discovery.iterator', 'discovery.iterator', (['context.GLOBAL.zk.conn', "('root.%s' % hostname)", '"""nodeinfo"""', '(False)'], {}), "(context.GLOBAL.zk.conn, 'root.%s' % hostname, 'nodeinfo',\n False)\n", (574, 643), False, 'from treadmill import discovery\n')] |
from django.shortcuts import render
from django.shortcuts import HttpResponse
# Create your views here.
def index(request):
return HttpResponse('Hello World</en>')
| [
"django.shortcuts.HttpResponse"
] | [((134, 166), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""Hello World</en>"""'], {}), "('Hello World</en>')\n", (146, 166), False, 'from django.shortcuts import HttpResponse\n')] |
#!/usr/bin/python3
# I don't believe in license.
# You can do whatever you want with this program.
import os
import sys
import re
import time
import requests
import random
import argparse
from urllib.parse import urlparse
from functools import partial
from colored import fg, bg, attr
from multiprocessing.dummy import Pool
TOKENS_FILE = os.path.dirname(os.path.realpath(__file__))+'/.tokens'
MIN_LENGTH = 5
_url_chars = '[a-zA-Z0-9\-\.\?\#\$&@%=_:/\]\[]'
_not_url_chars = '[^a-zA-Z0-9\-\.\?\#\$&@%=_:/\]\[]'
t_endpoints = []
t_exclude = [
r'^http://$',
r'^https://$',
r'^javascript:$',
r'^tel:$',
r'^mailto:$',
r'^text/javascript$',
r'^application/json$',
r'^application/javascript$',
r'^text/plain$',
r'^text/html$',
r'^text/x-python$',
r'^text/css$',
r'^image/png$',
r'^image/jpeg$',
r'^image/x-icon$',
r'^img/favicon.ico$',
r'^application/x-www-form-urlencoded$',
r'/Users/[0-9a-zA-Z\-\_]/Desktop',
r'www.w3.org',
r'schemas.android.com',
r'www.apple.com',
# r'^#',
# r'^\?',
# r'^javascript:',
# r'^mailto:',
]
t_regexp = [
r'[\'"\(].*(http[s]?://'+_url_chars+'*?)[\'"\)]',
r'[\'"\(](http[s]?://'+_url_chars+'+)',
r'[\'"\(]('+_url_chars+'+\.sdirect'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.htm'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.php'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.asp'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.js'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.xml'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.ini'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.conf'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.cfm'+_url_chars+'*)',
r'href\s*[.=]\s*[\'"]('+_url_chars+'+)',
r'src\s*[.=]\s*[\'"]('+_url_chars+'+)',
r'url\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'urlRoot\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'endpoint[s]\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'script[s]\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'\.ajax\s*\(\s*[\'"]('+_url_chars+'+)',
r'\.get\s*\(\s*[\'"]('+_url_chars+'+)',
r'\.post\s*\(\s*[\'"]('+_url_chars+'+)',
r'\.load\s*\(\s*[\'"]('+_url_chars+'+)',
### a bit noisy
# r'[\'"](' + _url_chars + '+/' + _url_chars + '+)?[\'"]',
# r'content\s*[.=]\s*[\'"]('+_url_chars+'+)',
]
def githubApiSearchCode( token, search, page, sort, order ):
headers = { "Authorization":"token "+token }
url = 'https://api.github.com/search/code?per_page=100&s=' + sort + '&type=Code&o=' + order + '&q=' + search + '&page=' + str(page)
# print(">>> "+url)
try:
r = requests.get( url, headers=headers, timeout=5 )
json = r.json()
# print(r.json)
# print(r.text)
return json
except Exception as e:
print( "%s[-] error occurred: %s%s" % (fg('red'),e,attr(0)) )
return False
def getRawUrl( result ):
raw_url = result['html_url']
raw_url = raw_url.replace( 'https://github.com/', 'https://raw.githubusercontent.com/' )
raw_url = raw_url.replace( '/blob/', '/' )
return raw_url
def readCode( regexp, confirm, display_source, display_relative, display_alldomains, result ):
time.sleep( random.random() )
url = getRawUrl( result )
if url in t_history_urls:
return
str = ''
t_local_endpoints = []
t_history_urls.append( url )
code = doGetCode( url )
# print( code )
# print( regexp )
# print( confirm )
# print( display_source )
# print( display_relative )
# print( display_alldomains )
if code:
if display_source:
str = "\n%s>>> %s%s\n\n" % (fg('yellow'),result['html_url'],attr(0))
matches = re.findall( regexp, code, re.IGNORECASE )
if matches:
# domain found in the code
for r in t_regexp:
# looking for endpoints
edpt = re.findall( r, code, re.IGNORECASE )
if edpt:
# endpoints found
for endpoint in edpt:
endpoint = endpoint.strip()
if len(endpoint) >= MIN_LENGTH:
# sys.stdout.write("%s\n" % endpoint)
# continue
goodbye = False
for exclude in t_exclude:
if re.match(exclude,endpoint,re.IGNORECASE):
goodbye = True
break
if goodbye:
continue
if endpoint.lower().startswith('http'):
is_relative = False
else:
is_relative = True
if is_relative and not display_relative:
continue
if endpoint in t_local_endpoints:
continue
# ???
# if not display_source and endpoint in t_endpoints:
# continue
if not display_alldomains and not is_relative:
try:
t_url_parse = urlparse( endpoint )
t_host_parse = tldextract.extract( t_url_parse.netloc )
domain = t_host_parse.domain
# print(domain)
sss = re.findall( regexp, t_url_parse.netloc, re.IGNORECASE )
if not sss:
continue
except Exception as e:
sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
t_endpoints.append( endpoint )
t_local_endpoints.append( endpoint )
str = str + ("%s\n" % endpoint)
# if display_source:
# str = str + ("%s\n" % endpoint)
# else:
# sys.stdout.write( "%s\n" % endpoint )
# if display_source and len(t_local_endpoints):
if len(t_local_endpoints):
sys.stdout.write( str )
def doGetCode( url ):
try:
r = requests.get( url, timeout=5 )
except Exception as e:
sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
return False
return r.text
parser = argparse.ArgumentParser()
parser.add_argument( "-t","--token",help="your github token (required)" )
parser.add_argument( "-d","--domain",help="domain you are looking for (required)" )
parser.add_argument( "-e","--extend",help="also look for <dummy>example.com", action="store_true" )
parser.add_argument( "-a","--all",help="displays urls of all other domains", action="store_true" )
parser.add_argument( "-r","--relative",help="also displays relative urls", action="store_true" )
parser.add_argument( "-s","--source",help="display urls where endpoints are found", action="store_true" )
parser.add_argument( "-v","--verbose",help="verbose mode, for debugging purpose", action="store_true" )
parser.parse_args()
args = parser.parse_args()
t_tokens = []
if args.token:
t_tokens = args.token.split(',')
else:
if os.path.isfile(TOKENS_FILE):
fp = open(TOKENS_FILE,'r')
t_tokens = fp.read().split("\n")
fp.close()
if not len(t_tokens):
parser.error( 'auth token is missing' )
if args.source:
_source = True
else:
_source = False
if args.domain:
_domain = args.domain
else:
parser.error( 'domain is missing' )
if args.relative:
_relative = True
else:
_relative = False
if args.all:
_alldomains = True
else:
_alldomains = False
t_sort_order = [
{ 'sort':'indexed', 'order':'desc', },
{ 'sort':'indexed', 'order':'asc', },
{ 'sort':'', 'order':'desc', }
]
t_history = []
t_history_urls = []
_search = '"' + _domain + '"'
### this is a test, looks like we got more result that way
import tldextract
t_host_parse = tldextract.extract( _domain )
if args.extend:
# which one is
_search = '"' + t_host_parse.domain + '"'
else:
# the most effective ?
_search = '"' + t_host_parse.domain + '.' + t_host_parse.suffix + '"'
# or simply ?
# _search = '"' + _domain + '"'
# print(_search)
# exit()
###
if args.extend:
_regexp = r'(([0-9a-z_\-\.]+\.)?([0-9a-z_\-]+)?'+t_host_parse.domain+'([0-9a-z_\-\.]+)?\.[a-z]{1,5})'
_confirm = t_host_parse.domain
else:
_regexp = r'((([0-9a-z_\-\.]+)\.)?' + _domain.replace('.','\.')+')'
_confirm = _domain
if args.verbose:
print( "Search: %s" % _search )
print( "Regexp: %s" % _regexp)
print( "Confirm: %s" % _confirm)
print( "Relative urls: %s" % _relative)
print( "All domains: %s" % _alldomains)
for so in t_sort_order:
page = 1
if args.verbose:
print( '\n----- %s %s\n' % (so['sort'],so['order']) )
while True:
if args.verbose:
print("page %d" % page)
time.sleep( random.random() )
token = random.choice( t_tokens )
t_json = githubApiSearchCode( token, _search, page, so['sort'], so['order'] )
# print(t_json)
if not t_json or 'documentation_url' in t_json:
if args.verbose:
print(t_json)
t_tokens.remove(token)
if len(t_tokens) == 0:
exit()
continue
page = page + 1
if 'items' in t_json and len(t_json['items']):
pool = Pool( 30 )
pool.map( partial(readCode,_regexp,_confirm,_source,_relative,_alldomains), t_json['items'] )
pool.close()
pool.join()
else:
break
exit()
| [
"random.choice",
"colored.fg",
"argparse.ArgumentParser",
"urllib.parse.urlparse",
"re.match",
"requests.get",
"os.path.isfile",
"tldextract.extract",
"os.path.realpath",
"re.findall",
"functools.partial",
"colored.attr",
"random.random",
"multiprocessing.dummy.Pool",
"sys.stdout.write"
... | [((6684, 6709), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6707, 6709), False, 'import argparse\n'), ((8280, 8307), 'tldextract.extract', 'tldextract.extract', (['_domain'], {}), '(_domain)\n', (8298, 8307), False, 'import tldextract\n'), ((7501, 7528), 'os.path.isfile', 'os.path.isfile', (['TOKENS_FILE'], {}), '(TOKENS_FILE)\n', (7515, 7528), False, 'import os\n'), ((357, 383), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (373, 383), False, 'import os\n'), ((2605, 2650), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'timeout': '(5)'}), '(url, headers=headers, timeout=5)\n', (2617, 2650), False, 'import requests\n'), ((3197, 3212), 'random.random', 'random.random', ([], {}), '()\n', (3210, 3212), False, 'import random\n'), ((3694, 3733), 're.findall', 're.findall', (['regexp', 'code', 're.IGNORECASE'], {}), '(regexp, code, re.IGNORECASE)\n', (3704, 3733), False, 'import re\n'), ((6422, 6443), 'sys.stdout.write', 'sys.stdout.write', (['str'], {}), '(str)\n', (6438, 6443), False, 'import sys\n'), ((6492, 6520), 'requests.get', 'requests.get', (['url'], {'timeout': '(5)'}), '(url, timeout=5)\n', (6504, 6520), False, 'import requests\n'), ((9308, 9331), 'random.choice', 'random.choice', (['t_tokens'], {}), '(t_tokens)\n', (9321, 9331), False, 'import random\n'), ((9274, 9289), 'random.random', 'random.random', ([], {}), '()\n', (9287, 9289), False, 'import random\n'), ((9774, 9782), 'multiprocessing.dummy.Pool', 'Pool', (['(30)'], {}), '(30)\n', (9778, 9782), False, 'from multiprocessing.dummy import Pool\n'), ((3889, 3923), 're.findall', 're.findall', (['r', 'code', 're.IGNORECASE'], {}), '(r, code, re.IGNORECASE)\n', (3899, 3923), False, 'import re\n'), ((9807, 9876), 'functools.partial', 'partial', (['readCode', '_regexp', '_confirm', '_source', '_relative', '_alldomains'], {}), '(readCode, _regexp, _confirm, _source, _relative, _alldomains)\n', (9814, 9876), False, 'from functools import partial\n'), ((3635, 3647), 'colored.fg', 'fg', (['"""yellow"""'], {}), "('yellow')\n", (3637, 3647), False, 'from colored import fg, bg, attr\n'), ((3667, 3674), 'colored.attr', 'attr', (['(0)'], {}), '(0)\n', (3671, 3674), False, 'from colored import fg, bg, attr\n'), ((2819, 2828), 'colored.fg', 'fg', (['"""red"""'], {}), "('red')\n", (2821, 2828), False, 'from colored import fg, bg, attr\n'), ((2831, 2838), 'colored.attr', 'attr', (['(0)'], {}), '(0)\n', (2835, 2838), False, 'from colored import fg, bg, attr\n'), ((6610, 6619), 'colored.fg', 'fg', (['"""red"""'], {}), "('red')\n", (6612, 6619), False, 'from colored import fg, bg, attr\n'), ((6622, 6629), 'colored.attr', 'attr', (['(0)'], {}), '(0)\n', (6626, 6629), False, 'from colored import fg, bg, attr\n'), ((4377, 4419), 're.match', 're.match', (['exclude', 'endpoint', 're.IGNORECASE'], {}), '(exclude, endpoint, re.IGNORECASE)\n', (4385, 4419), False, 'import re\n'), ((5331, 5349), 'urllib.parse.urlparse', 'urlparse', (['endpoint'], {}), '(endpoint)\n', (5339, 5349), False, 'from urllib.parse import urlparse\n'), ((5403, 5441), 'tldextract.extract', 'tldextract.extract', (['t_url_parse.netloc'], {}), '(t_url_parse.netloc)\n', (5421, 5441), False, 'import tldextract\n'), ((5603, 5656), 're.findall', 're.findall', (['regexp', 't_url_parse.netloc', 're.IGNORECASE'], {}), '(regexp, t_url_parse.netloc, re.IGNORECASE)\n', (5613, 5656), False, 'import re\n'), ((5899, 5908), 'colored.fg', 'fg', (['"""red"""'], {}), "('red')\n", (5901, 5908), False, 'from colored import fg, bg, attr\n'), ((5911, 5918), 'colored.attr', 'attr', (['(0)'], {}), '(0)\n', (5915, 5918), False, 'from colored import fg, bg, attr\n')] |
"""
Solves a 3x3 square programmatically.
It is not meant to be a full blown solution for magic squares, but rather a writeup
of my thoughts on how it can be solved.
"""
import statistics
def make_pairs(I, mid):
"""
We take pairs as [ [9, 1], [8, 2], [7, 3], [6, 4]]
:param I:
:param mid:
:return:
"""
h = 0
t = len(I) - 1
pairs = []
while h < mid-1:
pairs.append([I[h], I[t]])
h += 1
t -= 1
return pairs
def squares(n):
I = [x for x in range(1, n * n + 1)]
cols = n
mid = statistics.median(I)
print(f"I: {I}")
print(f"cols: {cols}")
print(f"mid: {mid}")
pairs = make_pairs(I, mid)
print(f"pairs: {pairs}")
# the pairs are taken from the left and rigt of mid
# so that the length is mid-1
assert len(pairs) == mid-1, f"len(pairs) = {len(pairs)} mid-1 = {mid-1}"
assert len(pairs[0]) == cols-1
if __name__ == '__main__':
squares(3)
| [
"statistics.median"
] | [((559, 579), 'statistics.median', 'statistics.median', (['I'], {}), '(I)\n', (576, 579), False, 'import statistics\n')] |
import napari
from pathlib import Path
from magicgui import magicgui
from typing import List
from cellfinder_napari.utils import brainglobe_logo
# TODO:
# how to store & fetch pre-trained models?
# TODO: params to add
NETWORK_VOXEL_SIZES = [5, 1, 1]
CUBE_WIDTH = 50
CUBE_HEIGHT = 20
CUBE_DEPTH = 20
# If using ROI, how many extra planes to analyse
MIN_PLANES_ANALYSE = 0
def detect():
from math import ceil
# from fancylog import fancylog
# import cellfinder_napari as program_for_log
from napari.qt.threading import thread_worker
from cellfinder_core.main import main as cellfinder_run
from cellfinder_core.classify.cube_generator import get_cube_depth_min_max
from imlib.cells.cells import Cell
from .utils import cells_to_array
DEFAULT_PARAMETERS = dict(
voxel_size_z=5,
voxel_size_y=2,
voxel_size_x=2,
Soma_diameter=16.0,
ball_xy_size=6,
ball_z_size=15,
Ball_overlap=0.6,
Filter_width=0.2,
Threshold=10,
Cell_spread=1.4,
Max_cluster=100000,
Trained_model=Path.home(),
Start_plane=0,
End_plane=0,
Number_of_free_cpus=2,
Analyse_local=False,
Debug=False,
)
@magicgui(
header=dict(
widget_type="Label",
label=f'<h1><img src="{brainglobe_logo}"width="100">cellfinder</h1>',
),
detection_label=dict(
widget_type="Label",
label="<h3>Cell detection</h3>",
),
data_options=dict(
widget_type="Label",
label="<b>Data:</b>",
),
detection_options=dict(
widget_type="Label",
label="<b>Detection:</b>",
),
classification_options=dict(
widget_type="Label",
label="<b>Classification:</b>",
),
misc_options=dict(
widget_type="Label",
label="<b>Misc:</b>",
),
voxel_size_z=dict(
value=DEFAULT_PARAMETERS["voxel_size_z"],
label="Voxel size (z)",
step=0.1,
),
voxel_size_y=dict(
value=DEFAULT_PARAMETERS["voxel_size_y"],
label="Voxel size (y)",
step=0.1,
),
voxel_size_x=dict(
value=DEFAULT_PARAMETERS["voxel_size_x"],
label="Voxel size (x)",
step=0.1,
),
Soma_diameter=dict(
value=DEFAULT_PARAMETERS["Soma_diameter"], step=0.1
),
ball_xy_size=dict(
value=DEFAULT_PARAMETERS["ball_xy_size"], label="Ball filter (xy)"
),
ball_z_size=dict(
value=DEFAULT_PARAMETERS["ball_z_size"], label="Ball filter (z)"
),
Ball_overlap=dict(value=DEFAULT_PARAMETERS["Ball_overlap"], step=0.1),
Filter_width=dict(value=DEFAULT_PARAMETERS["Filter_width"], step=0.1),
Threshold=dict(value=DEFAULT_PARAMETERS["Threshold"], step=0.1),
Cell_spread=dict(value=DEFAULT_PARAMETERS["Cell_spread"], step=0.1),
Max_cluster=dict(
value=DEFAULT_PARAMETERS["Max_cluster"], min=0, max=10000000
),
Trained_model=dict(value=DEFAULT_PARAMETERS["Trained_model"]),
Start_plane=dict(
value=DEFAULT_PARAMETERS["Start_plane"], min=0, max=100000
),
End_plane=dict(
value=DEFAULT_PARAMETERS["End_plane"], min=0, max=100000
),
Number_of_free_cpus=dict(
value=DEFAULT_PARAMETERS["Number_of_free_cpus"]
),
Analyse_local=dict(
value=DEFAULT_PARAMETERS["Analyse_local"], label="Analyse local"
),
Debug=dict(value=DEFAULT_PARAMETERS["Debug"]),
# Classification_batch_size=dict(max=4096),
call_button=True,
persist=True,
reset_button=dict(widget_type="PushButton", text="Reset defaults"),
)
def widget(
header,
detection_label,
data_options,
viewer: napari.Viewer,
Signal_image: napari.layers.Image,
Background_image: napari.layers.Image,
voxel_size_z: float,
voxel_size_y: float,
voxel_size_x: float,
detection_options,
Soma_diameter: float,
ball_xy_size: float,
ball_z_size: float,
Ball_overlap: float,
Filter_width: float,
Threshold: int,
Cell_spread: float,
Max_cluster: int,
classification_options,
Trained_model: Path,
misc_options,
Start_plane: int,
End_plane: int,
Number_of_free_cpus: int,
Analyse_local: bool,
Debug: bool,
reset_button,
) -> List[napari.types.LayerDataTuple]:
"""
Parameters
----------
Signal_image : napari.layers.Image
Image layer containing the labelled cells
Background_image : napari.layers.Image
Image layer without labelled cells
voxel_size_z : float
Size of your voxels in the axial dimension
voxel_size_y : float
Size of your voxels in the y direction (top to bottom)
voxel_size_x : float
Size of your voxels in the x direction (left to right)
Soma_diameter : float
The expected in-plane soma diameter (microns)
ball_xy_size : float
Elliptical morphological in-plane filter size (microns)
ball_z_size : float
Elliptical morphological axial filter size (microns)
Ball_overlap : float
Fraction of the morphological filter needed to be filled
to retain a voxel
Filter_width : float
Laplacian of Gaussian filter width (as a fraction of soma diameter)
Threshold : int
Cell intensity threshold (as a multiple of noise above the mean)
Cell_spread : float
Cell spread factor (for splitting up cell clusters)
Max_cluster : int
Largest putative cell cluster (in cubic um) where splitting
should be attempted
Trained_model : Path
Trained model file path
Start_plane : int
First plane to process (to process a subset of the data)
End_plane : int
Last plane to process (to process a subset of the data)
Number_of_free_cpus : int
How many CPU cores to leave free
Analyse_local : bool
Only analyse planes around the current position
Debug : bool
Increase logging
reset_button :
Reset parameters to default
"""
def add_layers(points):
points, rejected = cells_to_array(points)
viewer.add_points(
rejected,
name="Rejected",
size=15,
n_dimensional=True,
opacity=0.6,
symbol="ring",
face_color="lightskyblue",
visible=False,
metadata=dict(point_type=Cell.UNKNOWN),
)
viewer.add_points(
points,
name="Detected",
size=15,
n_dimensional=True,
opacity=0.6,
symbol="ring",
face_color="lightgoldenrodyellow",
metadata=dict(point_type=Cell.CELL),
)
@thread_worker
def run(
signal,
background,
voxel_sizes,
Soma_diameter,
ball_xy_size,
ball_z_size,
Start_plane,
End_plane,
Ball_overlap,
Filter_width,
Threshold,
Cell_spread,
Max_cluster,
Trained_model,
Number_of_free_cpus,
# Classification_batch_size,
):
points = cellfinder_run(
signal,
background,
voxel_sizes,
soma_diameter=Soma_diameter,
ball_xy_size=ball_xy_size,
ball_z_size=ball_z_size,
start_plane=Start_plane,
end_plane=End_plane,
ball_overlap_fraction=Ball_overlap,
log_sigma_size=Filter_width,
n_sds_above_mean_thresh=Threshold,
soma_spread_factor=Cell_spread,
max_cluster_size=Max_cluster,
trained_model=Trained_model,
n_free_cpus=Number_of_free_cpus,
# batch_size=Classification_batch_size,
)
return points
if End_plane == 0:
End_plane = len(Signal_image.data)
voxel_sizes = (voxel_size_z, voxel_size_y, voxel_size_x)
if Trained_model == Path.home():
Trained_model = None
if Analyse_local:
current_plane = viewer.dims.current_step[0]
# so a reasonable number of cells in the plane are detected
planes_needed = MIN_PLANES_ANALYSE + int(
ceil((CUBE_DEPTH * NETWORK_VOXEL_SIZES[0]) / voxel_size_z)
)
Start_plane, End_plane = get_cube_depth_min_max(
current_plane, planes_needed
)
Start_plane = max(0, Start_plane)
End_plane = min(len(Signal_image.data), End_plane)
worker = run(
Signal_image.data,
Background_image.data,
voxel_sizes,
Soma_diameter,
ball_xy_size,
ball_z_size,
Start_plane,
End_plane,
Ball_overlap,
Filter_width,
Threshold,
Cell_spread,
Max_cluster,
Trained_model,
Number_of_free_cpus,
# Classification_batch_size,
)
worker.returned.connect(add_layers)
worker.start()
widget.header.value = (
"<p>Efficient cell detection in large images.</p>"
'<p><a href="https://cellfinder.info" style="color:gray;">Website</a></p>'
'<p><a href="https://docs.brainglobe.info/cellfinder/napari-plugin" style="color:gray;">Documentation</a></p>'
'<p><a href="https://github.com/brainglobe/cellfinder-napari" style="color:gray;">Source</a></p>'
'<p><a href="https://www.biorxiv.org/content/10.1101/2020.10.21.348771v2" style="color:gray;">Citation</a></p>'
"<p><small>For help, hover the cursor over each parameter.</small>"
)
widget.header.native.setOpenExternalLinks(True)
@widget.reset_button.changed.connect
def restore_defaults(event=None):
for name, value in DEFAULT_PARAMETERS.items():
getattr(widget, name).value = value
return widget
| [
"pathlib.Path.home",
"math.ceil",
"cellfinder_core.classify.cube_generator.get_cube_depth_min_max",
"cellfinder_core.main.main"
] | [((1099, 1110), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (1108, 1110), False, 'from pathlib import Path\n'), ((7906, 8325), 'cellfinder_core.main.main', 'cellfinder_run', (['signal', 'background', 'voxel_sizes'], {'soma_diameter': 'Soma_diameter', 'ball_xy_size': 'ball_xy_size', 'ball_z_size': 'ball_z_size', 'start_plane': 'Start_plane', 'end_plane': 'End_plane', 'ball_overlap_fraction': 'Ball_overlap', 'log_sigma_size': 'Filter_width', 'n_sds_above_mean_thresh': 'Threshold', 'soma_spread_factor': 'Cell_spread', 'max_cluster_size': 'Max_cluster', 'trained_model': 'Trained_model', 'n_free_cpus': 'Number_of_free_cpus'}), '(signal, background, voxel_sizes, soma_diameter=Soma_diameter,\n ball_xy_size=ball_xy_size, ball_z_size=ball_z_size, start_plane=\n Start_plane, end_plane=End_plane, ball_overlap_fraction=Ball_overlap,\n log_sigma_size=Filter_width, n_sds_above_mean_thresh=Threshold,\n soma_spread_factor=Cell_spread, max_cluster_size=Max_cluster,\n trained_model=Trained_model, n_free_cpus=Number_of_free_cpus)\n', (7920, 8325), True, 'from cellfinder_core.main import main as cellfinder_run\n'), ((8811, 8822), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (8820, 8822), False, 'from pathlib import Path\n'), ((9194, 9246), 'cellfinder_core.classify.cube_generator.get_cube_depth_min_max', 'get_cube_depth_min_max', (['current_plane', 'planes_needed'], {}), '(current_plane, planes_needed)\n', (9216, 9246), False, 'from cellfinder_core.classify.cube_generator import get_cube_depth_min_max\n'), ((9083, 9139), 'math.ceil', 'ceil', (['(CUBE_DEPTH * NETWORK_VOXEL_SIZES[0] / voxel_size_z)'], {}), '(CUBE_DEPTH * NETWORK_VOXEL_SIZES[0] / voxel_size_z)\n', (9087, 9139), False, 'from math import ceil\n')] |
from setuptools import setup, find_packages
version = {}
with open("nltools/version.py") as f:
exec(f.read(), version)
with open("requirements.txt") as f:
requirements = f.read().splitlines()
extra_setuptools_args = dict(tests_require=["pytest"])
setup(
name="nltools",
version=version["__version__"],
author="<NAME>",
author_email="<EMAIL>",
url="https://cosanlab.github.io/nltools",
python_requires=">=3.6",
install_requires=requirements,
extras_require={"interactive_plots": ["ipywidgets>=5.2.2"]},
packages=find_packages(exclude=["nltools/tests"]),
package_data={"nltools": ["resources/*"]},
include_package_data=True,
license="LICENSE.txt",
description="A Python package to analyze neuroimaging data",
long_description="nltools is a collection of python tools to perform "
"preprocessing, univariate GLMs, and predictive "
"multivariate modeling of neuroimaging data. It is the "
"analysis engine powering www.neuro-learn.org.",
keywords=["neuroimaging", "preprocessing", "analysis", "machine-learning"],
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
],
**extra_setuptools_args
)
| [
"setuptools.find_packages"
] | [((559, 599), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['nltools/tests']"}), "(exclude=['nltools/tests'])\n", (572, 599), False, 'from setuptools import setup, find_packages\n')] |
from responsum.utils.fits_file import FITSFile, FITSExtension as FE
import pkg_resources
class FITSExtension(FE):
# I use __new__ instead of __init__ because I need to use the classmethod .from_columns instead of the
# constructor of fits.BinTableHDU
def __init__(self, data_tuple, header_tuple):
creator = "COSMOGRB v.%s" % (pkg_resources.get_distribution("cosmogrb").version)
super(FITSExtension, self).__init__(data_tuple, header_tuple, creator=creator)
| [
"pkg_resources.get_distribution"
] | [((351, 393), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""cosmogrb"""'], {}), "('cosmogrb')\n", (381, 393), False, 'import pkg_resources\n')] |
#! -*- coding:utf-8
from typing import Callable, List, Optional
import numpy as np
import torch
import torchvision
__all__ = ["CIFAR10", "FashionMNIST"]
class CIFAR10(torch.utils.data.Dataset):
def __init__(self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
indices: List[int] = None,
data_length: int = None,
shuffle: bool = False):
super(CIFAR10, self).__init__()
self.__datas__ = []
self.__labels__ = []
dataset = torchvision.datasets.CIFAR10(root,
train=train,
transform=transform,
target_transform=target_transform,
download=download)
self.__classes__ = dataset.classes
if indices is None:
indices = list(range(len(dataset)))
for i in indices: # load data and catching...
d, l = dataset[i]
self.__datas__.append(d)
self.__labels__.append(l)
self.__length__ = (len(self.data)
if data_length is None else data_length)
self.__indices__ = np.arange(len(self.data))
self.__shuffle__ = shuffle
if self.shuffle:
np.random.shuffle(self.__indices__)
self.__call_count__ = 0
@property
def data(self): return self.__datas__
@property
def label(self): return self.__labels__
@property
def classes(self): return self.__classes__
@property
def indices(self): return self.__indices__
@property
def shuffle(self): return self.__shuffle__
def __len__(self): return self.__length__
def __getitem__(self, idx):
idx = self.indices[idx % len(self.data)]
d = self.data[idx]
l = self.label[idx]
self.__call_count__ += 1
if self.shuffle and self.__call_count__ >= len(self):
np.random.shuffle(self.__indices__)
self.__call_count__ = 0
return d, l
class FashionMNIST(torch.utils.data.Dataset):
def __init__(self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
indices: List[int] = None,
data_length: int = None,
shuffle: bool = False):
super(FashionMNIST, self).__init__()
self.__datas__ = []
self.__labels__ = []
dataset = torchvision.datasets.FashionMNIST(root,
train=train,
transform=transform,
target_transform=target_transform,
download=download)
self.__classes__ = dataset.classes
if indices is None:
indices = list(range(len(dataset)))
for i in indices: # load data and catching...
d, l = dataset[i]
self.__datas__.append(d)
self.__labels__.append(l)
self.__length__ = (len(self.data)
if data_length is None else data_length)
self.__indices__ = np.arange(len(self.data))
self.__shuffle__ = shuffle
if self.shuffle:
np.random.shuffle(self.__indices__)
self.__call_count__ = 0
@property
def data(self): return self.__datas__
@property
def label(self): return self.__labels__
@property
def classes(self): return self.__classes__
@property
def indices(self): return self.__indices__
@property
def shuffle(self): return self.__shuffle__
def __len__(self): return self.__length__
def __getitem__(self, idx):
idx = self.indices[idx % len(self.data)]
d = self.data[idx]
l = self.label[idx]
self.__call_count__ += 1
if self.shuffle and self.__call_count__ >= len(self):
np.random.shuffle(self.__indices__)
self.__call_count__ = 0
return d, l
| [
"numpy.random.shuffle",
"torchvision.datasets.FashionMNIST",
"torchvision.datasets.CIFAR10"
] | [((709, 835), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', (['root'], {'train': 'train', 'transform': 'transform', 'target_transform': 'target_transform', 'download': 'download'}), '(root, train=train, transform=transform,\n target_transform=target_transform, download=download)\n', (737, 835), False, 'import torchvision\n'), ((2883, 3014), 'torchvision.datasets.FashionMNIST', 'torchvision.datasets.FashionMNIST', (['root'], {'train': 'train', 'transform': 'transform', 'target_transform': 'target_transform', 'download': 'download'}), '(root, train=train, transform=transform,\n target_transform=target_transform, download=download)\n', (2916, 3014), False, 'import torchvision\n'), ((1553, 1588), 'numpy.random.shuffle', 'np.random.shuffle', (['self.__indices__'], {}), '(self.__indices__)\n', (1570, 1588), True, 'import numpy as np\n'), ((2230, 2265), 'numpy.random.shuffle', 'np.random.shuffle', (['self.__indices__'], {}), '(self.__indices__)\n', (2247, 2265), True, 'import numpy as np\n'), ((3752, 3787), 'numpy.random.shuffle', 'np.random.shuffle', (['self.__indices__'], {}), '(self.__indices__)\n', (3769, 3787), True, 'import numpy as np\n'), ((4429, 4464), 'numpy.random.shuffle', 'np.random.shuffle', (['self.__indices__'], {}), '(self.__indices__)\n', (4446, 4464), True, 'import numpy as np\n')] |
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
import torch
import numpy as np
import cv2
class Model:
def __init__(self,confidence_thresh=0.6):
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_thresh # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
self.model = DefaultPredictor(cfg)
def get_seg_output(self,image:np.array):
out = self.model(image)['instances']
outputs = [(out.pred_masks[i],out.pred_classes[i]) for i in range(len(out.pred_classes)) if out.pred_classes[i]==0]
return outputs
class Preprocessing:
def __init__(self,kernel,dilate_iter=5,erode_iter=1):
self.kernel = kernel
self.dilate_iter = dilate_iter
self.erode_iter = erode_iter
def get_target_mask(self,masks):
out = np.zeros(masks[0].shape)
for mask in masks:
out += mask
out = np.clip(out,0,1)
return out
def get_trimap(self,masks):
target_mask = self.get_target_mask(masks)
erode = cv2.erode(target_mask.astype('uint8'),self.kernel,iterations=self.erode_iter)
dilate = cv2.dilate(target_mask.astype('uint8'),self.kernel,iterations=self.dilate_iter)
h, w = target_mask.shape
trimap = np.zeros((h, w, 2))
trimap[erode == 1, 1] = 1
trimap[dilate == 0, 0] = 1
return trimap
| [
"numpy.clip",
"detectron2.config.get_cfg",
"detectron2.model_zoo.get_checkpoint_url",
"numpy.zeros",
"detectron2.model_zoo.get_config_file",
"detectron2.engine.DefaultPredictor"
] | [((333, 342), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (340, 342), False, 'from detectron2.config import get_cfg\n'), ((580, 669), 'detectron2.model_zoo.get_checkpoint_url', 'model_zoo.get_checkpoint_url', (['"""COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"""'], {}), "(\n 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')\n", (608, 669), False, 'from detectron2 import model_zoo\n'), ((686, 707), 'detectron2.engine.DefaultPredictor', 'DefaultPredictor', (['cfg'], {}), '(cfg)\n', (702, 707), False, 'from detectron2.engine import DefaultPredictor\n'), ((1221, 1245), 'numpy.zeros', 'np.zeros', (['masks[0].shape'], {}), '(masks[0].shape)\n', (1229, 1245), True, 'import numpy as np\n'), ((1311, 1329), 'numpy.clip', 'np.clip', (['out', '(0)', '(1)'], {}), '(out, 0, 1)\n', (1318, 1329), True, 'import numpy as np\n'), ((1681, 1700), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (1689, 1700), True, 'import numpy as np\n'), ((371, 457), 'detectron2.model_zoo.get_config_file', 'model_zoo.get_config_file', (['"""COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"""'], {}), "(\n 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')\n", (396, 457), False, 'from detectron2 import model_zoo\n')] |
from setuptools import setup
setup(
name="example-advanced-package", version="0.0.0", packages=[],
)
| [
"setuptools.setup"
] | [((30, 98), 'setuptools.setup', 'setup', ([], {'name': '"""example-advanced-package"""', 'version': '"""0.0.0"""', 'packages': '[]'}), "(name='example-advanced-package', version='0.0.0', packages=[])\n", (35, 98), False, 'from setuptools import setup\n')] |
from distutils.core import setup
setup(
name='pyASA',
packages=['pyASA'],
version='0.1.0',
description='Wrapper for the Cisco ASA REST API',
author='xpac',
author_email='<EMAIL>',
url='https://github.com/xpac1985/pyASA',
download_url='https://github.com/xpac1985/pyASA/tarball/0.1.0',
keywords=['cisco', 'asa', 'rest-api', 'wrapper', 'alpha'],
classifiers=[],
)
| [
"distutils.core.setup"
] | [((34, 378), 'distutils.core.setup', 'setup', ([], {'name': '"""pyASA"""', 'packages': "['pyASA']", 'version': '"""0.1.0"""', 'description': '"""Wrapper for the Cisco ASA REST API"""', 'author': '"""xpac"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/xpac1985/pyASA"""', 'download_url': '"""https://github.com/xpac1985/pyASA/tarball/0.1.0"""', 'keywords': "['cisco', 'asa', 'rest-api', 'wrapper', 'alpha']", 'classifiers': '[]'}), "(name='pyASA', packages=['pyASA'], version='0.1.0', description=\n 'Wrapper for the Cisco ASA REST API', author='xpac', author_email=\n '<EMAIL>', url='https://github.com/xpac1985/pyASA', download_url=\n 'https://github.com/xpac1985/pyASA/tarball/0.1.0', keywords=['cisco',\n 'asa', 'rest-api', 'wrapper', 'alpha'], classifiers=[])\n", (39, 378), False, 'from distutils.core import setup\n')] |
import numpy as np
from example_functions import target_function_dict
from line_search_methods import line_search_dict
from main_methods import main_method_dict
from config import best_params
from helpers import generate_x0
def run_one(_theta, _main_method, _ls_method, params, ls_params):
theta = _theta()
x0 = generate_x0(theta.n, *theta.bounds)
ls_method = _ls_method(ls_params)
main_method = _main_method(params, ls_method)
# print('Correct solution: ', theta.min_values)
result = main_method(theta, np.array(x0))
# print('Found solution: ', result['min_value'])
# print(result_to_string(result))
return result
def result_to_string(result):
perf = result['performance']
ls_perf = perf['line_search']
return ', '.join([str(s) for s in [
result['status'], perf['iterations'], f"{perf['duration']} ms",
ls_perf['iterations'], f"{round(ls_perf['duration'], 2)} ms",
]])
np.warnings.filterwarnings('ignore', category=RuntimeWarning)
for theta in best_params:
for main_method in best_params[theta]:
for line_search in best_params[theta][main_method]:
result = run_one(
target_function_dict[theta],
main_method_dict[main_method],
line_search_dict[line_search],
best_params[theta][main_method][line_search]['params'],
best_params[theta][main_method][line_search]['ls_params'],
)
status = result['status']
print(f"{status}: {theta},{main_method},{line_search}")
| [
"numpy.array",
"helpers.generate_x0",
"numpy.warnings.filterwarnings"
] | [((946, 1007), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (972, 1007), True, 'import numpy as np\n'), ((323, 358), 'helpers.generate_x0', 'generate_x0', (['theta.n', '*theta.bounds'], {}), '(theta.n, *theta.bounds)\n', (334, 358), False, 'from helpers import generate_x0\n'), ((532, 544), 'numpy.array', 'np.array', (['x0'], {}), '(x0)\n', (540, 544), True, 'import numpy as np\n')] |
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
class GetTypeView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
if hasattr(user, 'vendor'):
type = 'vendor'
name = user.vendor.name
elif hasattr(user, 'buyer'):
type = 'buyer'
name = user.buyer.name
else:
type = 'admin'
name = user.username
data = {
'name': name,
'type': type,
'username': user.username
}
return Response(data)
| [
"rest_framework.response.Response"
] | [((676, 690), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (684, 690), False, 'from rest_framework.response import Response\n')] |
'''
This is an abstract example of Extracting in an ETL pipeline.
Inspired from the "Introduction to Data Engineering" course on Datacamp.com
Author: <NAME>
'''
import requests
# Fetch the Hackernews post
resp = requests.get("https://hacker-news.firebaseio.com/v0/item/16222426.json")
# Print the response parsed as JSON
print(resp.json())
# Assign the score of the test to post_score
post_score = resp.json()['score']
print(post_score)
# Function to extract table to a pandas DataFrame
def extract_table_to_pandas(tablename, db_engine):
query = "SELECT * FROM {}".format(tablename)
return pd.read_sql(query, db_engine)
# Connect to the database using the connection URI
connection_uri = "postgresql://repl:password@localhost:5432/pagila"
db_engine = sqlalchemy.create_engine(connection_uri)
# Extract the film table into a pandas DataFrame
extract_table_to_pandas("film", db_engine)
# Extract the customer table into a pandas DataFrame
extract_table_to_pandas("customer", db_engine)
| [
"requests.get"
] | [((215, 287), 'requests.get', 'requests.get', (['"""https://hacker-news.firebaseio.com/v0/item/16222426.json"""'], {}), "('https://hacker-news.firebaseio.com/v0/item/16222426.json')\n", (227, 287), False, 'import requests\n')] |
"""
* 보석과 돌
J는 보석이며, S는 갖고 있는 돌이다. S에는 보석이 몇 개나 있을까? 대소문자는 구분한다.
- Example 1
Input : J = "aA", S = "aAAbbbb"
Output : 3
- Example 2
Input : J = "z", S = "ZZ"
Output : 0
"""
import collections
class Solution:
# Counter로 계산 생략
def numJewelsInStones(self, J: str, S: str) -> int:
freqs = collections.Counter(S)
count = 0
for char in J:
count += freqs[char]
return count
if __name__ == '__main__':
solution = Solution()
print(solution.numJewelsInStones("aA", "aAAbbbb")) | [
"collections.Counter"
] | [((303, 325), 'collections.Counter', 'collections.Counter', (['S'], {}), '(S)\n', (322, 325), False, 'import collections\n')] |
from __future__ import print_function
import time
import uuid
import Adafruit_BluefruitLE
CHARACTERISTIC_SERVICE_UUID = uuid.UUID('0000fee0-0000-1000-8000-00805f9b34fb')
CHARACTERISTIC_DATA_UUID = uuid.UUID('0000fee1-0000-1000-8000-00805f9b34fb')
provider = Adafruit_BluefruitLE.get_provider()
def main():
provider.clear_cached_data()
adapter = provider.get_default_adapter()
if not adapter.is_powered:
adapter.power_on()
print('Searching for device...')
try:
adapter.start_scan()
device = provider.find_device(service_uuids=[CHARACTERISTIC_SERVICE_UUID])
if device is None:
raise RuntimeError('Failed to find device!')
else:
print(device)
print('device: {0}'.format(device.name))
print('id: {0}'.format(device.id))
finally:
adapter.stop_scan()
print('Connecting to device...')
device.connect()
try:
print('Discovering services...')
device.discover([CHARACTERISTIC_SERVICE_UUID], [CHARACTERISTIC_DATA_UUID])
service = device.find_service(CHARACTERISTIC_SERVICE_UUID)
print('service uuid: {0}'.format(service.uuid))
data = service.find_characteristic(CHARACTERISTIC_DATA_UUID)
print('characteristic uuid: {0}'.format(data.uuid))
print('Writing Data..')
bs = bytes(range(16))
bs = b'\x77\x61\x6E\x67\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\x00\x00\x00\x00\xE1\x0C\x06\x17\x2D\x23\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\xfe\xc6\xc6'
data.write_value(bs)
time.sleep(0.1)
bs = b'\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\xfe\xc6\xc0\xc0\xc6\xc6\xc6\xfe'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\x00\xc6\xcc\xd8\xf0\xd8\xcc\xc6\xc6\x00\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7c\x6c\x6c'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x7c\x00\x00\x00\x00\x00\x00\x00\x6c\x78\x70\x60\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xce\xc6\xc6\xc6\xc6'
data.write_value(bs)
time.sleep(0.1)
bs = b'\xce\xf8\x00\x00\x00\x30\x30\x30\x30\x30\x30\x30\x30\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\xfe\xc0\xc0\xfe\xc0\xc0\xc0\xfe\x00\x00\x00\x00\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(3)
print('Writing done.')
finally:
device.disconnect()
provider.initialize()
provider.run_mainloop_with(main)
| [
"Adafruit_BluefruitLE.get_provider",
"uuid.UUID",
"time.sleep"
] | [((123, 172), 'uuid.UUID', 'uuid.UUID', (['"""0000fee0-0000-1000-8000-00805f9b34fb"""'], {}), "('0000fee0-0000-1000-8000-00805f9b34fb')\n", (132, 172), False, 'import uuid\n'), ((200, 249), 'uuid.UUID', 'uuid.UUID', (['"""0000fee1-0000-1000-8000-00805f9b34fb"""'], {}), "('0000fee1-0000-1000-8000-00805f9b34fb')\n", (209, 249), False, 'import uuid\n'), ((262, 297), 'Adafruit_BluefruitLE.get_provider', 'Adafruit_BluefruitLE.get_provider', ([], {}), '()\n', (295, 297), False, 'import Adafruit_BluefruitLE\n'), ((1493, 1508), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1503, 1508), False, 'import time\n'), ((1627, 1642), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1637, 1642), False, 'import time\n'), ((1761, 1776), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1771, 1776), False, 'import time\n'), ((1895, 1910), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1905, 1910), False, 'import time\n'), ((2029, 2044), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2039, 2044), False, 'import time\n'), ((2163, 2178), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2173, 2178), False, 'import time\n'), ((2297, 2312), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2307, 2312), False, 'import time\n'), ((2431, 2446), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2441, 2446), False, 'import time\n'), ((2565, 2580), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2575, 2580), False, 'import time\n'), ((2699, 2714), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2709, 2714), False, 'import time\n'), ((2833, 2848), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2843, 2848), False, 'import time\n'), ((2967, 2980), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2977, 2980), False, 'import time\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for turkish_morphology.validate."""
import os
from turkish_morphology import analysis_pb2
from turkish_morphology import validate
from absl.testing import absltest
from absl.testing import parameterized
from google.protobuf import text_format
_TESTDATA_DIR = "turkish_morphology/testdata"
def _read_file(path):
with open(path, "r") as f:
read = f.read()
return read
def _read_analysis(basename):
path = os.path.join(_TESTDATA_DIR, f"{basename}.pbtxt")
return text_format.Parse(_read_file(path), analysis_pb2.Analysis())
class AnalysisTest(parameterized.TestCase):
@parameterized.named_parameters([
{
"testcase_name": "SingleInflectionalGroupsWithProperFeature",
"basename": "araba_with_proper",
},
{
"testcase_name": "SingleInflectionalGroupsWithoutProperFeature",
"basename": "araba_without_proper",
},
{
"testcase_name": "MultipleInflectionalGroupsWithProperFeature",
"basename": "yasa_with_proper",
},
{
"testcase_name": "MultipleInflectionalGroupsWithoutProperFeature",
"basename": "yasa_without_proper",
},
])
def test_success(self, basename):
analysis = _read_analysis(basename)
actual = validate.analysis(analysis)
self.assertIsNone(actual)
@parameterized.named_parameters([
{
"testcase_name": "AnalysisMissingInflectionalGroups",
"basename": "invalid_empty_analysis",
"message": "Analysis is missing inflectional groups",
},
{
"testcase_name": "InflectionalGroupMissingPartOfSpeechTag",
"basename": "invalid_ig_missing_pos",
"message": "Inflectional group 2 is missing part-of-speech tag",
},
{
"testcase_name": "InflectionalGroupEmptyPartOfSpeechTag",
"basename": "invalid_ig_empty_pos",
"message": "Inflectional group 2 part-of-speech tag is empty",
},
{
"testcase_name": "FirstInflectionalGroupMissingRoot",
"basename": "invalid_first_ig_missing_root",
"message": "Inflectional group 1 is missing root",
},
{
"testcase_name": "DerivedInflectionalGroupMissingDerivation",
"basename": "invalid_derived_ig_missing_derivation",
"message": "Inflectional group 2 is missing derivational affix",
},
{
"testcase_name": "AffixMissingFeature",
"basename": "invalid_affix_missing_feature",
"message": "Affix is missing feature",
},
{
"testcase_name": "DerivationalAffixMissingMetaMorpheme",
"basename": "invalid_derivational_affix_missing_meta_morpheme",
"message": "Derivational affix is missing meta-morpheme",
},
{
"testcase_name": "DerivationalAffixEmptyMetaMorpheme",
"basename": "invalid_derivational_affix_empty_meta_morpheme",
"message": "Derivational affix meta-morpheme is empty",
},
{
"testcase_name": "FeatureMissingCategory",
"basename": "invalid_feature_missing_category",
"message": "Feature is missing category",
},
{
"testcase_name": "FeatureEmptyCategory",
"basename": "invalid_feature_empty_category",
"message": "Feature category is empty",
},
{
"testcase_name": "FeatureMissingValue",
"basename": "invalid_feature_missing_value",
"message": "Feature is missing value",
},
{
"testcase_name": "FeatureEmptyValue",
"basename": "invalid_feature_empty_value",
"message": "Feature value is empty",
},
{
"testcase_name": "RootMissingMorpheme",
"basename": "invalid_root_missing_morpheme",
"message": "Root is missing morpheme",
},
{
"testcase_name": "RootEmptyMorpheme",
"basename": "invalid_root_empty_morpheme",
"message": "Root morpheme is empty",
},
])
def test_raises_exception(self, basename, message):
analysis = _read_analysis(basename)
with self.assertRaisesRegexp(validate.IllformedAnalysisError, message):
validate.analysis(analysis)
if __name__ == "__main__":
absltest.main()
| [
"turkish_morphology.validate.analysis",
"os.path.join",
"absl.testing.parameterized.named_parameters",
"absl.testing.absltest.main",
"turkish_morphology.analysis_pb2.Analysis"
] | [((1038, 1086), 'os.path.join', 'os.path.join', (['_TESTDATA_DIR', 'f"""{basename}.pbtxt"""'], {}), "(_TESTDATA_DIR, f'{basename}.pbtxt')\n", (1050, 1086), False, 'import os\n'), ((1207, 1673), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["[{'testcase_name': 'SingleInflectionalGroupsWithProperFeature', 'basename':\n 'araba_with_proper'}, {'testcase_name':\n 'SingleInflectionalGroupsWithoutProperFeature', 'basename':\n 'araba_without_proper'}, {'testcase_name':\n 'MultipleInflectionalGroupsWithProperFeature', 'basename':\n 'yasa_with_proper'}, {'testcase_name':\n 'MultipleInflectionalGroupsWithoutProperFeature', 'basename':\n 'yasa_without_proper'}]"], {}), "([{'testcase_name':\n 'SingleInflectionalGroupsWithProperFeature', 'basename':\n 'araba_with_proper'}, {'testcase_name':\n 'SingleInflectionalGroupsWithoutProperFeature', 'basename':\n 'araba_without_proper'}, {'testcase_name':\n 'MultipleInflectionalGroupsWithProperFeature', 'basename':\n 'yasa_with_proper'}, {'testcase_name':\n 'MultipleInflectionalGroupsWithoutProperFeature', 'basename':\n 'yasa_without_proper'}])\n", (1237, 1673), False, 'from absl.testing import parameterized\n'), ((1938, 4158), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["[{'testcase_name': 'AnalysisMissingInflectionalGroups', 'basename':\n 'invalid_empty_analysis', 'message':\n 'Analysis is missing inflectional groups'}, {'testcase_name':\n 'InflectionalGroupMissingPartOfSpeechTag', 'basename':\n 'invalid_ig_missing_pos', 'message':\n 'Inflectional group 2 is missing part-of-speech tag'}, {'testcase_name':\n 'InflectionalGroupEmptyPartOfSpeechTag', 'basename':\n 'invalid_ig_empty_pos', 'message':\n 'Inflectional group 2 part-of-speech tag is empty'}, {'testcase_name':\n 'FirstInflectionalGroupMissingRoot', 'basename':\n 'invalid_first_ig_missing_root', 'message':\n 'Inflectional group 1 is missing root'}, {'testcase_name':\n 'DerivedInflectionalGroupMissingDerivation', 'basename':\n 'invalid_derived_ig_missing_derivation', 'message':\n 'Inflectional group 2 is missing derivational affix'}, {'testcase_name':\n 'AffixMissingFeature', 'basename': 'invalid_affix_missing_feature',\n 'message': 'Affix is missing feature'}, {'testcase_name':\n 'DerivationalAffixMissingMetaMorpheme', 'basename':\n 'invalid_derivational_affix_missing_meta_morpheme', 'message':\n 'Derivational affix is missing meta-morpheme'}, {'testcase_name':\n 'DerivationalAffixEmptyMetaMorpheme', 'basename':\n 'invalid_derivational_affix_empty_meta_morpheme', 'message':\n 'Derivational affix meta-morpheme is empty'}, {'testcase_name':\n 'FeatureMissingCategory', 'basename':\n 'invalid_feature_missing_category', 'message':\n 'Feature is missing category'}, {'testcase_name':\n 'FeatureEmptyCategory', 'basename': 'invalid_feature_empty_category',\n 'message': 'Feature category is empty'}, {'testcase_name':\n 'FeatureMissingValue', 'basename': 'invalid_feature_missing_value',\n 'message': 'Feature is missing value'}, {'testcase_name':\n 'FeatureEmptyValue', 'basename': 'invalid_feature_empty_value',\n 'message': 'Feature value is empty'}, {'testcase_name':\n 'RootMissingMorpheme', 'basename': 'invalid_root_missing_morpheme',\n 'message': 'Root is missing morpheme'}, {'testcase_name':\n 'RootEmptyMorpheme', 'basename': 'invalid_root_empty_morpheme',\n 'message': 'Root morpheme is empty'}]"], {}), "([{'testcase_name':\n 'AnalysisMissingInflectionalGroups', 'basename':\n 'invalid_empty_analysis', 'message':\n 'Analysis is missing inflectional groups'}, {'testcase_name':\n 'InflectionalGroupMissingPartOfSpeechTag', 'basename':\n 'invalid_ig_missing_pos', 'message':\n 'Inflectional group 2 is missing part-of-speech tag'}, {'testcase_name':\n 'InflectionalGroupEmptyPartOfSpeechTag', 'basename':\n 'invalid_ig_empty_pos', 'message':\n 'Inflectional group 2 part-of-speech tag is empty'}, {'testcase_name':\n 'FirstInflectionalGroupMissingRoot', 'basename':\n 'invalid_first_ig_missing_root', 'message':\n 'Inflectional group 1 is missing root'}, {'testcase_name':\n 'DerivedInflectionalGroupMissingDerivation', 'basename':\n 'invalid_derived_ig_missing_derivation', 'message':\n 'Inflectional group 2 is missing derivational affix'}, {'testcase_name':\n 'AffixMissingFeature', 'basename': 'invalid_affix_missing_feature',\n 'message': 'Affix is missing feature'}, {'testcase_name':\n 'DerivationalAffixMissingMetaMorpheme', 'basename':\n 'invalid_derivational_affix_missing_meta_morpheme', 'message':\n 'Derivational affix is missing meta-morpheme'}, {'testcase_name':\n 'DerivationalAffixEmptyMetaMorpheme', 'basename':\n 'invalid_derivational_affix_empty_meta_morpheme', 'message':\n 'Derivational affix meta-morpheme is empty'}, {'testcase_name':\n 'FeatureMissingCategory', 'basename':\n 'invalid_feature_missing_category', 'message':\n 'Feature is missing category'}, {'testcase_name':\n 'FeatureEmptyCategory', 'basename': 'invalid_feature_empty_category',\n 'message': 'Feature category is empty'}, {'testcase_name':\n 'FeatureMissingValue', 'basename': 'invalid_feature_missing_value',\n 'message': 'Feature is missing value'}, {'testcase_name':\n 'FeatureEmptyValue', 'basename': 'invalid_feature_empty_value',\n 'message': 'Feature value is empty'}, {'testcase_name':\n 'RootMissingMorpheme', 'basename': 'invalid_root_missing_morpheme',\n 'message': 'Root is missing morpheme'}, {'testcase_name':\n 'RootEmptyMorpheme', 'basename': 'invalid_root_empty_morpheme',\n 'message': 'Root morpheme is empty'}])\n", (1968, 4158), False, 'from absl.testing import parameterized\n'), ((4886, 4901), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4899, 4901), False, 'from absl.testing import absltest\n'), ((1132, 1155), 'turkish_morphology.analysis_pb2.Analysis', 'analysis_pb2.Analysis', ([], {}), '()\n', (1153, 1155), False, 'from turkish_morphology import analysis_pb2\n'), ((1876, 1903), 'turkish_morphology.validate.analysis', 'validate.analysis', (['analysis'], {}), '(analysis)\n', (1893, 1903), False, 'from turkish_morphology import validate\n'), ((4827, 4854), 'turkish_morphology.validate.analysis', 'validate.analysis', (['analysis'], {}), '(analysis)\n', (4844, 4854), False, 'from turkish_morphology import validate\n')] |
import skimage.color
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import imghdr
import time
"""
Duplicate Image Finder (DIF): function that searches a given directory for images and finds duplicate/similar images among them.
Outputs the number of found duplicate/similar image pairs with a list of the filenames having lower resolution.
"""
class dif:
def compare_images(directory, show_imgs=False, similarity="normal", px_size=50, delete=False):
"""
directory (str)......folder path to search for duplicate/similar images
show_imgs (bool).....False = omits the output and doesn't show found images
True = shows duplicate/similar images found in output
similarity (str)....."normal" = searches for duplicates, recommended setting, MSE < 200
"high" = serached for exact duplicates, extremly sensitive to details, MSE < 0.1
"low" = searches for similar images, MSE < 1000
px_size (int)........recommended not to change default value
resize images to px_size height x width (in pixels) before being compared
the higher the pixel size, the more computational ressources and time required
delete (bool)........! please use with care, as this cannot be undone
lower resolution duplicate images that were found are automatically deleted
OUTPUT (set).........a set of the filenames of the lower resolution duplicate images
"""
# list where the found duplicate/similar images are stored
duplicates = []
lower_res = []
imgs_matrix = dif.create_imgs_matrix(directory, px_size)
# search for similar images, MSE < 1000
if similarity == "low":
ref = 1000
# search for exact duplicate images, extremly sensitive, MSE < 0.1
elif similarity == "high":
ref = 0.1
# normal, search for duplicates, recommended, MSE < 200
else:
ref = 200
main_img = 0
compared_img = 1
nrows, ncols = px_size, px_size
srow_A = 0
erow_A = nrows
srow_B = erow_A
erow_B = srow_B + nrows
while erow_B <= imgs_matrix.shape[0]:
while compared_img < (len(image_files)):
# select two images from imgs_matrix
imgA = imgs_matrix[srow_A: erow_A, # rows
0: ncols] # columns
imgB = imgs_matrix[srow_B: erow_B, # rows
0: ncols] # columns
# compare the images
rotations = 0
while image_files[main_img] not in duplicates and rotations <= 3:
if rotations != 0:
imgB = dif.rotate_img(imgB)
err = dif.mse(imgA, imgB)
if err < ref:
if show_imgs:
dif.show_img_figs(imgA, imgB, err)
dif.show_file_info(compared_img, main_img)
dif.add_to_list(image_files[main_img], duplicates)
dif.check_img_quality(directory, image_files[main_img], image_files[compared_img], lower_res)
rotations += 1
srow_B += nrows
erow_B += nrows
compared_img += 1
srow_A += nrows
erow_A += nrows
srow_B = erow_A
erow_B = srow_B + nrows
main_img += 1
compared_img = main_img + 1
msg = "\n***\nFound " + str(len(duplicates)) + " duplicate image pairs in " + str(
len(image_files)) + " total images.\n\nThe following files have lower resolution:"
print(msg)
print(lower_res, "\n")
time.sleep(0.5)
if delete:
usr = input("Are you sure you want to delete all lower resolution duplicate images? (y/n)")
if str(usr) == "y":
dif.delete_imgs(directory, set(lower_res))
else:
print("Image deletion canceled.")
return set(lower_res)
else:
return set(lower_res)
def _process_directory(directory):
directory += os.sep
if not os.path.isdir(directory):
raise FileNotFoundError(f"Directory: " + directory + " does not exist")
return directory
# Function that searches the folder for image files, converts them to a matrix
def create_imgs_matrix(directory, px_size):
directory = dif._process_directory(directory)
global image_files
image_files = []
# create list of all files in directory
folder_files = [filename for filename in os.listdir(directory)]
# create images matrix
counter = 0
for filename in folder_files:
if not os.path.isdir(directory + filename) and imghdr.what(directory + filename):
img = cv2.imdecode(np.fromfile(directory + filename, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
if type(img) == np.ndarray:
img = img[..., 0:3]
img = cv2.resize(img, dsize=(px_size, px_size), interpolation=cv2.INTER_CUBIC)
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
if counter == 0:
imgs_matrix = img
image_files.append(filename)
counter += 1
else:
imgs_matrix = np.concatenate((imgs_matrix, img))
image_files.append(filename)
return imgs_matrix
# Function that calulates the mean squared error (mse) between two image matrices
def mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
# Function that plots two compared image files and their mse
def show_img_figs(imageA, imageB, err):
fig = plt.figure()
plt.suptitle("MSE: %.2f" % (err))
# plot first image
ax = fig.add_subplot(1, 2, 1)
plt.imshow(imageA, cmap=plt.cm.gray)
plt.axis("off")
# plot second image
ax = fig.add_subplot(1, 2, 2)
plt.imshow(imageB, cmap=plt.cm.gray)
plt.axis("off")
# show the images
plt.show()
# Function for rotating an image matrix by a 90 degree angle
def rotate_img(image):
image = np.rot90(image, k=1, axes=(0, 1))
return image
# Function for printing filename info of plotted image files
def show_file_info(compared_img, main_img):
print("Duplicate file: " + image_files[main_img] + " and " + image_files[compared_img])
# Function for appending items to a list
def add_to_list(filename, list):
list.append(filename)
# Function for checking the quality of compared images, appends the lower quality image to the list
def check_img_quality(directory, imageA, imageB, list):
directory = dif._process_directory(directory)
size_imgA = os.stat(directory + imageA).st_size
size_imgB = os.stat(directory + imageB).st_size
if size_imgA > size_imgB:
dif.add_to_list(imageB, list)
else:
dif.add_to_list(imageA, list)
def delete_imgs(directory, filenames_set):
directory = dif._process_directory(directory)
print("\nDeletion in progress...")
deleted = 0
for filename in filenames_set:
try:
os.remove(directory + filename)
print("Deleted file:", filename)
deleted += 1
except:
print("Could not delete file:", filename)
print("\n***\nDeleted", deleted, "duplicates.")
| [
"matplotlib.pyplot.imshow",
"numpy.fromfile",
"os.listdir",
"os.stat",
"time.sleep",
"os.remove",
"matplotlib.pyplot.figure",
"os.path.isdir",
"imghdr.what",
"numpy.rot90",
"numpy.concatenate",
"matplotlib.pyplot.axis",
"cv2.resize",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show"... | [((3912, 3927), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3922, 3927), False, 'import time\n'), ((6198, 6210), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6208, 6210), True, 'import matplotlib.pyplot as plt\n'), ((6219, 6250), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('MSE: %.2f' % err)"], {}), "('MSE: %.2f' % err)\n", (6231, 6250), True, 'import matplotlib.pyplot as plt\n'), ((6326, 6362), 'matplotlib.pyplot.imshow', 'plt.imshow', (['imageA'], {'cmap': 'plt.cm.gray'}), '(imageA, cmap=plt.cm.gray)\n', (6336, 6362), True, 'import matplotlib.pyplot as plt\n'), ((6371, 6386), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6379, 6386), True, 'import matplotlib.pyplot as plt\n'), ((6461, 6497), 'matplotlib.pyplot.imshow', 'plt.imshow', (['imageB'], {'cmap': 'plt.cm.gray'}), '(imageB, cmap=plt.cm.gray)\n', (6471, 6497), True, 'import matplotlib.pyplot as plt\n'), ((6506, 6521), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6514, 6521), True, 'import matplotlib.pyplot as plt\n'), ((6556, 6566), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6564, 6566), True, 'import matplotlib.pyplot as plt\n'), ((6676, 6709), 'numpy.rot90', 'np.rot90', (['image'], {'k': '(1)', 'axes': '(0, 1)'}), '(image, k=1, axes=(0, 1))\n', (6684, 6709), True, 'import numpy as np\n'), ((4380, 4404), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (4393, 4404), False, 'import os\n'), ((7293, 7320), 'os.stat', 'os.stat', (['(directory + imageA)'], {}), '(directory + imageA)\n', (7300, 7320), False, 'import os\n'), ((7349, 7376), 'os.stat', 'os.stat', (['(directory + imageB)'], {}), '(directory + imageB)\n', (7356, 7376), False, 'import os\n'), ((4855, 4876), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (4865, 4876), False, 'import os\n'), ((5030, 5063), 'imghdr.what', 'imghdr.what', (['(directory + filename)'], {}), '(directory + filename)\n', (5041, 5063), False, 'import imghdr\n'), ((7754, 7785), 'os.remove', 'os.remove', (['(directory + filename)'], {}), '(directory + filename)\n', (7763, 7785), False, 'import os\n'), ((4990, 5025), 'os.path.isdir', 'os.path.isdir', (['(directory + filename)'], {}), '(directory + filename)\n', (5003, 5025), False, 'import os\n'), ((5100, 5149), 'numpy.fromfile', 'np.fromfile', (['(directory + filename)'], {'dtype': 'np.uint8'}), '(directory + filename, dtype=np.uint8)\n', (5111, 5149), True, 'import numpy as np\n'), ((5283, 5355), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(px_size, px_size)', 'interpolation': 'cv2.INTER_CUBIC'}), '(img, dsize=(px_size, px_size), interpolation=cv2.INTER_CUBIC)\n', (5293, 5355), False, 'import cv2\n'), ((5691, 5725), 'numpy.concatenate', 'np.concatenate', (['(imgs_matrix, img)'], {}), '((imgs_matrix, img))\n', (5705, 5725), True, 'import numpy as np\n')] |
# A função min_max deverá rodar em O(n) e o código não pode usar nenhuma
# lib do Python (sort, min, max e etc)
# Não pode usar qualquer laço (while, for), a função deve ser recursiva
# Ou delegar a solução para uma função puramente recursiva
import unittest
def bora(cont, seq, min, max):
if cont < len(seq):
if int(seq[cont]) > int(seq[cont + 1]) and int(seq[cont]) > max:
max = int(seq[cont])
if int(seq[cont]) < int(seq[cont + 1]) and int(seq[cont]) < min:
min = int(seq[cont])
cont = cont + 1
if cont == (len(seq) - 1):
if int(seq[len(seq) - 1]) > max:
max = int(seq[len(seq) - 1])
if int(seq[len(seq) - 1]) < min:
min = int(seq[len(seq) - 1])
return (min, max)
return bora(cont, seq, min, max)
def min_max(seq):
'''
:param seq: uma sequencia
:return: (min, max)
Retorna tupla cujo primeiro valor mínimo (min) é o valor
mínimo da sequencia seq.
O segundo é o valor máximo (max) da sequencia
O(n)
'''
if len(seq) == 0:
return (None, None)
if len(seq) == 1:
return seq[0], seq[0]
val = bora(0, seq, seq[0], seq[0])
return val
class MinMaxTestes(unittest.TestCase):
def test_lista_vazia(self):
self.assertTupleEqual((None, None), min_max([]))
def test_lista_len_1(self):
self.assertTupleEqual((1, 1), min_max([1]))
def test_lista_consecutivos(self):
self.assertTupleEqual((0, 500), min_max(list(range(501))))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((1556, 1571), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1569, 1571), False, 'import unittest\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import boto3
import os
from aws_lambda_powertools import Logger
logger = Logger()
client = boto3.client('stepfunctions')
sfnArn = os.environ['SFN_ARN']
def lambda_handler(event, context):
# TODO implement
logger.info(f"Received Choice: {event['Choice']}")
response = client.start_execution(
stateMachineArn=sfnArn,
input=json.dumps(event)
)
logger.info(f"Received Response: {response}")
return {
'statusCode': 200,
'body': json.dumps(response,default=str)
}
| [
"json.dumps",
"aws_lambda_powertools.Logger",
"boto3.client"
] | [((189, 197), 'aws_lambda_powertools.Logger', 'Logger', ([], {}), '()\n', (195, 197), False, 'from aws_lambda_powertools import Logger\n'), ((207, 236), 'boto3.client', 'boto3.client', (['"""stepfunctions"""'], {}), "('stepfunctions')\n", (219, 236), False, 'import boto3\n'), ((611, 644), 'json.dumps', 'json.dumps', (['response'], {'default': 'str'}), '(response, default=str)\n', (621, 644), False, 'import json\n'), ((471, 488), 'json.dumps', 'json.dumps', (['event'], {}), '(event)\n', (481, 488), False, 'import json\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the AES decrypter object."""
import unittest
from dfvfs.encryption import aes_decrypter
from dfvfs.lib import definitions
from tests.encryption import test_lib
class AESDecrypterTestCase(test_lib.DecrypterTestCase):
"""Tests for the AES decrypter object."""
_AES_INITIALIZATION_VECTOR = b'This is an IV456'
_AES_KEY = b'This is a key123'
def testInitialization(self):
"""Tests the initialization method."""
# Test missing arguments.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter()
# Test unsupported block cipher mode.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode='bogus', key=self._AES_KEY)
# Test missing initialization vector.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC, key=self._AES_KEY)
# Test missing initialization vector with valid block cipher mode.
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_ECB, key=self._AES_KEY)
# Test incorrect key size.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_ECB, key=b'Wrong key size')
# Test incorrect initialization vector type.
with self.assertRaises(TypeError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector='Wrong IV type', key=self._AES_KEY)
# Test incorrect initialization vector size.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector=b'Wrong IV size', key=self._AES_KEY)
def testDecrypt(self):
"""Tests the Decrypt method."""
decrypter = aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector=self._AES_INITIALIZATION_VECTOR,
key=self._AES_KEY)
# Test full decryption.
expected_decrypted_data = b'This is secret encrypted text!!!'
decrypted_data, remaining_encrypted_data = decrypter.Decrypt(
b'2|\x7f\xd7\xff\xbay\xf9\x95?\x81\xc7\xaafV\xceB\x01\xdb8E7\xfe'
b'\x92j\xf0\x1d(\xb9\x9f\xad\x13', finalize=True)
self.assertEqual(decrypted_data, expected_decrypted_data)
self.assertEqual(remaining_encrypted_data, b'')
# Reset decrypter.
decrypter = aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector=self._AES_INITIALIZATION_VECTOR,
key=self._AES_KEY)
# Test partial decryption.
partial_encrypted_data = (
b'2|\x7f\xd7\xff\xbay\xf9\x95?\x81\xc7\xaafV\xceB\x01\xdb8E7\xfe')
decrypted_data, remaining_encrypted_data = decrypter.Decrypt(
partial_encrypted_data)
self.assertEqual(decrypted_data, b'')
self.assertEqual(remaining_encrypted_data, partial_encrypted_data)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"dfvfs.encryption.aes_decrypter.AESDecrypter"
] | [((3052, 3067), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3065, 3067), False, 'import unittest\n'), ((1024, 1119), 'dfvfs.encryption.aes_decrypter.AESDecrypter', 'aes_decrypter.AESDecrypter', ([], {'cipher_mode': 'definitions.ENCRYPTION_MODE_ECB', 'key': 'self._AES_KEY'}), '(cipher_mode=definitions.ENCRYPTION_MODE_ECB, key\n =self._AES_KEY)\n', (1050, 1119), False, 'from dfvfs.encryption import aes_decrypter\n'), ((1880, 2029), 'dfvfs.encryption.aes_decrypter.AESDecrypter', 'aes_decrypter.AESDecrypter', ([], {'cipher_mode': 'definitions.ENCRYPTION_MODE_CBC', 'initialization_vector': 'self._AES_INITIALIZATION_VECTOR', 'key': 'self._AES_KEY'}), '(cipher_mode=definitions.ENCRYPTION_MODE_CBC,\n initialization_vector=self._AES_INITIALIZATION_VECTOR, key=self._AES_KEY)\n', (1906, 2029), False, 'from dfvfs.encryption import aes_decrypter\n'), ((2500, 2649), 'dfvfs.encryption.aes_decrypter.AESDecrypter', 'aes_decrypter.AESDecrypter', ([], {'cipher_mode': 'definitions.ENCRYPTION_MODE_CBC', 'initialization_vector': 'self._AES_INITIALIZATION_VECTOR', 'key': 'self._AES_KEY'}), '(cipher_mode=definitions.ENCRYPTION_MODE_CBC,\n initialization_vector=self._AES_INITIALIZATION_VECTOR, key=self._AES_KEY)\n', (2526, 2649), False, 'from dfvfs.encryption import aes_decrypter\n'), ((561, 589), 'dfvfs.encryption.aes_decrypter.AESDecrypter', 'aes_decrypter.AESDecrypter', ([], {}), '()\n', (587, 589), False, 'from dfvfs.encryption import aes_decrypter\n'), ((679, 745), 'dfvfs.encryption.aes_decrypter.AESDecrypter', 'aes_decrypter.AESDecrypter', ([], {'cipher_mode': '"""bogus"""', 'key': 'self._AES_KEY'}), "(cipher_mode='bogus', key=self._AES_KEY)\n", (705, 745), False, 'from dfvfs.encryption import aes_decrypter\n'), ((846, 941), 'dfvfs.encryption.aes_decrypter.AESDecrypter', 'aes_decrypter.AESDecrypter', ([], {'cipher_mode': 'definitions.ENCRYPTION_MODE_CBC', 'key': 'self._AES_KEY'}), '(cipher_mode=definitions.ENCRYPTION_MODE_CBC, key\n =self._AES_KEY)\n', (872, 941), False, 'from dfvfs.encryption import aes_decrypter\n'), ((1202, 1301), 'dfvfs.encryption.aes_decrypter.AESDecrypter', 'aes_decrypter.AESDecrypter', ([], {'cipher_mode': 'definitions.ENCRYPTION_MODE_ECB', 'key': "b'Wrong key size'"}), "(cipher_mode=definitions.ENCRYPTION_MODE_ECB, key\n =b'Wrong key size')\n", (1228, 1301), False, 'from dfvfs.encryption import aes_decrypter\n'), ((1403, 1536), 'dfvfs.encryption.aes_decrypter.AESDecrypter', 'aes_decrypter.AESDecrypter', ([], {'cipher_mode': 'definitions.ENCRYPTION_MODE_CBC', 'initialization_vector': '"""Wrong IV type"""', 'key': 'self._AES_KEY'}), "(cipher_mode=definitions.ENCRYPTION_MODE_CBC,\n initialization_vector='Wrong IV type', key=self._AES_KEY)\n", (1429, 1536), False, 'from dfvfs.encryption import aes_decrypter\n'), ((1650, 1784), 'dfvfs.encryption.aes_decrypter.AESDecrypter', 'aes_decrypter.AESDecrypter', ([], {'cipher_mode': 'definitions.ENCRYPTION_MODE_CBC', 'initialization_vector': "b'Wrong IV size'", 'key': 'self._AES_KEY'}), "(cipher_mode=definitions.ENCRYPTION_MODE_CBC,\n initialization_vector=b'Wrong IV size', key=self._AES_KEY)\n", (1676, 1784), False, 'from dfvfs.encryption import aes_decrypter\n')] |
import torch
import numpy as np
PAD_TOKEN_INDEX = 0
def pad_masking(x, target_len):
# x: (batch_size, seq_len)
batch_size, seq_len = x.size()
padded_positions = x == PAD_TOKEN_INDEX # (batch_size, seq_len)
pad_mask = padded_positions.unsqueeze(1).expand(batch_size, target_len, seq_len)
return pad_mask
def subsequent_masking(x):
# x: (batch_size, seq_len - 1)
batch_size, seq_len = x.size()
subsequent_mask = np.triu(np.ones(shape=(seq_len, seq_len)), k=1).astype('uint8')
subsequent_mask = torch.tensor(subsequent_mask).to(x.device)
subsequent_mask = subsequent_mask.unsqueeze(0).expand(batch_size, seq_len, seq_len)
return subsequent_mask | [
"torch.tensor",
"numpy.ones"
] | [((534, 563), 'torch.tensor', 'torch.tensor', (['subsequent_mask'], {}), '(subsequent_mask)\n', (546, 563), False, 'import torch\n'), ((456, 489), 'numpy.ones', 'np.ones', ([], {'shape': '(seq_len, seq_len)'}), '(shape=(seq_len, seq_len))\n', (463, 489), True, 'import numpy as np\n')] |
# Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.
import ast
import os
import re
import shlex
from itertools import zip_longest
from string import Template
from typing import *
from .pithy.fs import *
from .pithy.io import *
from .pithy.types import * # type: ignore
from .ctx import Ctx
coverage_name = '_.coven'
class TestCaseError(Exception): pass
class IotParseError(TestCaseError): pass
class FileExpectation:
def __init__(self, path: str, info: Dict[str, str], expand_str_fn: Callable) -> None:
if path.find('..') != -1:
raise TestCaseError(f"file expectation {path}: cannot contain '..'")
self.path = path
self.mode = info.get('mode', 'equal')
validate_exp_mode(path, self.mode)
try:
exp_path = info['path']
except KeyError:
val = info.get('val', '')
else:
if 'val' in info:
raise TestCaseError(f'file expectation {path}: cannot specify both `path` and `val` properties')
exp_path_expanded = expand_str_fn(exp_path)
val = read_from_path(exp_path_expanded)
self.val = expand_str_fn(val)
if self.mode == 'match':
self.match_pattern_pairs = self.compile_match_lines(self.val)
else:
self.match_pattern_pairs = []
self.match_error: Optional[Tuple[int, Optional[Pattern], str]] = None
def compile_match_lines(self, text: str) -> List[Tuple[str, Pattern]]:
return [self.compile_match_line(i, line) for i, line in enumerate(text.splitlines(True), 1)]
def compile_match_line(self, i: int, line: str) -> Tuple[str, Pattern]:
prefix = line[:2]
contents = line[2:]
valid_prefixes = ('|', '|\n', '| ', '~', '~\n', '~ ')
if prefix not in valid_prefixes:
raise TestCaseError("test expectation: {!r};\nmatch line {}: must begin with one of: {}\n{!r}".format(
self.path, i, ', '.join(repr(p) for p in valid_prefixes), line))
if prefix.endswith('\n'):
# these two cases exist to be lenient about empty lines,
# where otherwise the pattern line would consist of the symbol and a single space.
# since trailing space is highlighted by `git diff` and often considered bad style,
# we allow it to be omitted, since there is no loss of generality for the patterns.
contents = '\n'
try:
return (line, re.compile(contents if prefix == '~ ' else re.escape(contents)))
except Exception as e:
raise TestCaseError('test expectation: {!r};\nmatch line {}: pattern is invalid regex:\n{!r}\n{}'.format(
self.path, i, contents, e)) from e
def __repr__(self) -> str:
return 'FileExpectation({!r}, {!r}, {!r})'.format(self.path, self.mode, self.val)
class ParConfig(NamedTuple):
'''
Parameterized case configuration data.
'''
stem: str
pattern: Pattern[str]
config: Dict
class Case:
'Case represents a single test case, or a default.'
def __init__(self, ctx:Ctx, proto: Optional['Case'], stem: str, config: Dict, par_configs: List[ParConfig],
par_stems_used: Set[str]) -> None:
self.stem: str = path_dir(stem) if path_name(stem) == '_' else stem # TODO: better naming for 'logical stem' (see code in main).
self.name: str = path_name(self.stem)
# derived properties.
self.multi_index: Optional[int] = None
self.test_info_paths: Set[str] = set() # the files that comprise the test case.
self.dflt_src_paths: List[str] = []
self.coverage_targets: List[str] = []
self.test_dir: str = ''
self.test_cmd: List[str] = []
self.test_env: Dict[str, str] = {}
self.test_in: Optional[str] = None
self.test_expectations: List[FileExpectation] = []
self.test_links: List[Tuple[str, str]] = [] # sequence of (orig-name, link-name) pairs.
self.test_par_args: Dict[str, Tuple[str, ...]] = {} # the match groups that resulted from applying the regex for the given parameterized stem.
# configurable properties.
self.args: Optional[List[str]] = None # arguments to follow the file under test.
self.cmd: Optional[List[str]] = None # command string/list with which to invoke the test.
self.coverage: Optional[List[str]] = None # list of names to include in code coverage analysis.
self.code: Optional[int] = None # the expected exit code.
self.compile: Optional[List[Any]] = None # the optional list of compile commands, each a string or list of strings.
self.compile_timeout: Optional[int] = None
self.desc: Optional[str] = None # description.
self.env: Optional[Dict[str, str]] = None # environment variables.
self.err_mode: Optional[str] = None # comparison mode for stderr expectation.
self.err_path: Optional[str] = None # file path for stderr expectation.
self.err_val: Optional[str] = None # stderr expectation value (mutually exclusive with err_path).
self.files: Optional[Dict[str, Dict[str, str]]] = None # additional file expectations.
self.in_: Optional[str] = None # stdin as text.
self.interpreter: Optional[str] = None # interpreter to prepend to cmd.
self.interpreter_args: Optional[List[str]] = None # interpreter args.
self.links: Union[None, Set[str], Dict[str, str]] = None # symlinks to be made into the test directory; written as a str, set or dict.
self.out_mode: Optional[str] = None # comparison mode for stdout expectation.
self.out_path: Optional[str] = None # file path for stdout expectation.
self.out_val: Optional[str] = None # stdout expectation value (mutually exclusive with out_path).
self.skip: Optional[str] = None
self.timeout: Optional[int] = None
try:
if proto is not None:
for key in case_key_validators:
setattr(self, key, getattr(proto, key))
for par_stem, par_re, par_config in par_configs:
m = par_re.fullmatch(stem)
if not m: continue
for key, val in par_config.items():
self.add_val_for_key(ctx, key, val)
self.test_par_args[par_stem] = cast(Tuple[str, ...], m.groups()) # Save the strings matching the parameters to use as arguments.
par_stems_used.add(par_stem) # Mark this parameterized config as used.
for key, val in config.items():
self.add_val_for_key(ctx, key, val)
# do all additional computations now, so as to fail as quickly as possible.
self.derive_info(ctx)
except Exception as e:
outL(f'iotest error: broken test case: {stem}')
outL(f' exception: {type(e).__name__}: {e}.')
# not sure if it makes sense to describe cases for some exceptions;
# for now, just carve out the ones for which it is definitely useless.
if not isinstance(e, IotParseError):
self.describe(stdout)
outL()
exit(1)
def __repr__(self) -> str: return f'Case(stem={self.stem!r}, ...)'
def __lt__(self, other: 'Case') -> bool: return self.stem < other.stem
@property
def coverage_path(self) -> str:
'Returned path is relative to self.test_dir.'
return self.std_name(coverage_name)
@property
def coven_cmd_prefix(self) -> List[str]:
coven_cmd = ['coven', '-output', self.coverage_path]
if self.coverage_targets:
coven_cmd += ['-targets'] + self.coverage_targets
coven_cmd.append('--')
return coven_cmd
def std_name(self, std: str) -> str: return f'{self.name}.{std}'
def describe(self, file: TextIO) -> None:
def stable_repr(val: Any) -> str:
if is_dict(val):
return '{{{}}}'.format(', '.join(f'{k!r}:{v!r}' for k, v in sorted(val.items()))) # sort dict representations. TODO: factor out.
return repr(val)
items = sorted(self.__dict__.items())
writeLSSL(file, 'Case:', *('{}: {}'.format(k, stable_repr(v)) for k, v in items))
def add_val_for_key(self, ctx:Ctx, key:str, val:Any) -> None:
try: name = iot_key_subs[key]
except KeyError: name = key.replace('-', '_')
try:
exp_desc, predicate, validator_fn = case_key_validators[name]
except KeyError as e:
raise TestCaseError(f'invalid config key: {key!r}') from e
if not predicate(val):
raise TestCaseError(f'key: {key!r}: expected value of type: {exp_desc}; received: {val!r}')
if validator_fn:
validator_fn(name, val)
if ctx.dbg:
existing = getattr(self, name)
if existing is not None and existing != val:
errL(f'note: {self.stem}: overriding value for key: {name!r};\n existing: {existing!r}\n incoming: {val!r}')
setattr(self, name, val)
def derive_info(self, ctx: Ctx) -> None:
if self.name == '_default': return # do not process prototype cases.
rel_dir, _, multi_index = self.stem.partition('.')
self.multi_index = int(multi_index) if multi_index else None
self.test_dir = path_join(ctx.build_dir, rel_dir)
env = self.test_env # local alias for convenience.
env['BUILD'] = ctx.build_dir
env['NAME'] = self.name
env['PROJ'] = abs_path(ctx.proj_dir)
env['SRC'] = self.dflt_src_paths[0] if len(self.dflt_src_paths) == 1 else 'NONE'
env['STEM'] = self.stem
env['DIR'] = path_dir(self.stem)
def default_to_env(key: str) -> None:
if key not in env and key in os.environ:
env[key] = os.environ[key]
default_to_env('HOME') # otherwise git fails with "error: Could not expand include path '~/.gitcinclude'".
default_to_env('LANG') # necessary to make std file handles unicode-aware.
default_to_env('NODE_PATH')
default_to_env('PATH')
default_to_env('PYTHONPATH')
default_to_env('SDKROOT')
def expand_str(val: Any) -> str:
t = Template(val)
return t.safe_substitute(env)
def expand(val: Any) -> List[str]:
if val is None:
return []
if is_str(val):
# note: plain strings are expanded first, then split.
# this behavior matches that of shell commands more closely than split-then-expand,
# but introduces all the confusion of shell quoting.
return shlex.split(expand_str(val))
if is_list(val):
return [expand_str(el) for el in val]
raise TestCaseError(f'expand received unexpected value: {val}')
# add the case env one item at a time.
# sorted because we want expansion to be deterministic;
# TODO: should probably expand everything with just the builtins;
# otherwise would need some dependency resolution between vars.
if self.env:
for key, val in sorted(self.env.items()):
if key in env:
raise TestCaseError(f'specified env contains reserved key: {key}')
env[key] = expand_str(val)
self.compile_cmds = [expand(cmd) for cmd in self.compile] if self.compile else []
cmd: List[str] = []
if self.interpreter:
cmd += expand(self.interpreter)
if self.interpreter_args:
if not self.interpreter: raise TestCaseError('interpreter_args specified without interpreter')
cmd += expand(self.interpreter_args)
if self.cmd is not None:
cmd += expand(self.cmd)
elif self.compile_cmds:
cmd += ['./' + self.name]
elif len(self.dflt_src_paths) > 1:
raise TestCaseError(f'no `cmd` specified and multiple default source paths found: {self.dflt_src_paths}')
elif len(self.dflt_src_paths) < 1:
raise TestCaseError('no `cmd` specified and no default source path found')
else:
dflt_path = self.dflt_src_paths[0]
dflt_name = path_name(dflt_path)
self.test_links.append((dflt_path, dflt_name))
prefix = '' if cmd else './'
cmd.append(prefix + dflt_name)
if self.args is None:
par_args = list(self.test_par_args.get(path_stem(dflt_path), ()))
cmd += par_args
if self.args:
cmd += expand(self.args) or []
self.test_cmd = cmd
if self.multi_index and self.links:
raise TestCaseError("non-lead subcase of a multicase cannot specify 'links'")
elif isinstance(self.links, str):
link = expand_str(self.links)
self.test_links += [(link, path_name(link))]
elif isinstance(self.links, set):
self.test_links += sorted((n, path_name(n)) for n in map(expand_str, self.links))
elif isinstance(self.links, dict):
self.test_links += sorted((expand_str(orig), expand_str(link)) for orig, link in self.links.items())
elif self.links is not None:
raise TestCaseError(self.links)
self.coverage_targets = expand(self.coverage)
self.test_in = expand_str(self.in_) if self.in_ is not None else None
def add_std_exp(name:str, mode:Optional[str], path:Optional[str], val:Optional[str]) -> None:
info = {}
if mode is not None: info['mode'] = mode
if path is not None: info['path'] = path
if val is not None: info['val'] = val
exp = FileExpectation(self.std_name(name), info, expand_str)
self.test_expectations.append(exp)
add_std_exp('err', self.err_mode, self.err_path, self.err_val)
add_std_exp('out', self.out_mode, self.out_path, self.out_val)
for path, info in (self.files or {}).items():
exp = FileExpectation(path, info, expand_str)
self.test_expectations.append(exp)
iot_key_subs = {
'.in' : 'in_',
'.err' : 'err_val',
'.out' : 'out_val',
'.dflt_src_paths' : 'dflt_src_paths',
'.test_info_paths' : 'test_info_paths',
'in' : 'in_',
}
def is_int_or_ellipsis(val: Any) -> bool:
return val is Ellipsis or is_int(val)
def is_compile_cmd(val: Any) -> bool:
return is_list(val) and all(is_str_or_list(el) for el in val)
def is_valid_links(val: Any) -> bool:
return is_str(val) or is_set_of_str(val) or is_dict_of_str(val)
def validate_path(key: str, path: Any) -> None:
if not path: raise TestCaseError(f'key: {key}: path is empty: {path!r}')
if '.' in path: raise TestCaseError(f"key: {key}: path cannot contain '.': {path!r}")
def validate_exp_mode(key: str, mode: str) -> None:
if mode not in file_expectation_fns:
raise TestCaseError(f'key: {key}: invalid file expectation mode: {mode}')
def validate_exp_dict(key: str, val: Any) -> None:
if not is_dict(val):
raise TestCaseError(f'file expectation: {key}: value must be a dictionary.')
for k in val:
if k not in ('mode', 'path', 'val'):
raise TestCaseError(f'file expectation: {key}: invalid expectation property: {k}')
def validate_files_dict(key: str, val: Any) -> None:
if not is_dict(val):
raise TestCaseError(f'file expectation: {key}: value must be a dictionary.')
for k, exp_dict in val.items():
if k in ('out', 'err'):
raise TestCaseError(f'key: {key}: {k}: use the standard properties instead ({k}_mode, {k}_path, {k}_val).')
validate_exp_dict(k, exp_dict)
def validate_links_dict(key: str, val: Any) -> None:
if is_str(val):
items = [(val, val)]
elif is_set(val):
items = [(p, p) for p in val]
elif is_dict(val):
items = val.items()
else: raise AssertionError('`validate_links_dict` types inconsistent with `is_valid_links`.')
for orig, link in items:
if orig.find('..') != -1: raise TestCaseError(f"key: {key}: link original contains '..': {orig}")
if link.find('..') != -1: raise TestCaseError(f"key: {key}: link location contains '..': {link}")
case_key_validators: Dict[str, Tuple[str, Callable[[Any], bool], Optional[Callable[[str, Any], None]]]] = {
# key => msg, validator_predicate, validator_fn.
'args': ('string or list of strings', is_str_or_list, None),
'cmd': ('string or list of strings', is_str_or_list, None),
'code': ('int or `...`', is_int_or_ellipsis, None),
'compile': ('list of (str | list of str)', is_compile_cmd, None),
'compile_timeout': ('positive int', is_pos_int, None),
'coverage': ('string or list of strings', is_str_or_list, None),
'desc': ('str', is_str, None),
'dflt_src_paths': ('list of str', is_list_of_str, None),
'env': ('dict of strings', is_dict_of_str, None),
'err_mode': ('str', is_str, validate_exp_mode),
'err_path': ('str', is_str, None),
'err_val': ('str', is_str, None),
'files': ('dict', is_dict, validate_files_dict),
'in_': ('str', is_str, None),
'interpreter': ('string or list of strings', is_str_or_list, None),
'interpreter_args': ('string or list of strings', is_str_or_list, None),
'links': ('string or (dict | set) of strings', is_valid_links, validate_links_dict),
'out_mode': ('str', is_str, validate_exp_mode),
'out_path': ('str', is_str, None),
'out_val': ('str', is_str, None),
'skip': ('bool', is_bool, None),
'test_info_paths': ('set of str', is_set_of_str, None),
'timeout': ('positive int', is_pos_int, None),
}
# file expectation functions.
def compare_equal(exp: FileExpectation, val: str) -> bool:
return exp.val == val # type: ignore
def compare_contain(exp: FileExpectation, val: str) -> bool:
return val.find(exp.val) != -1
def compare_match(exp: FileExpectation, val: str) -> bool:
lines: List[str] = val.splitlines(True)
for i, (pair, line) in enumerate(zip_longest(exp.match_pattern_pairs, lines), 1):
if pair is None:
exp.match_error = (i, None, line)
return False
(pattern, regex) = pair
if line is None or not regex.fullmatch(line):
exp.match_error = (i, pattern, line)
return False
return True
def compare_ignore(exp: FileExpectation, val: str) -> bool:
return True
file_expectation_fns = {
'equal' : compare_equal,
'contain' : compare_contain,
'match' : compare_match,
'ignore' : compare_ignore,
}
| [
"string.Template",
"re.escape",
"itertools.zip_longest"
] | [((17321, 17364), 'itertools.zip_longest', 'zip_longest', (['exp.match_pattern_pairs', 'lines'], {}), '(exp.match_pattern_pairs, lines)\n', (17332, 17364), False, 'from itertools import zip_longest\n'), ((9536, 9549), 'string.Template', 'Template', (['val'], {}), '(val)\n', (9544, 9549), False, 'from string import Template\n'), ((2373, 2392), 're.escape', 're.escape', (['contents'], {}), '(contents)\n', (2382, 2392), False, 'import re\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import codecs
import lightgbm as lgb
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Read data
image_file_path = './simulated_dpc_data.csv'
with codecs.open(image_file_path, "r", "Shift-JIS", "ignore") as file:
dpc = pd.read_table(file, delimiter=",")
# dpc_r, g_dpc_r_1, g_r: restricted data from dpc
dpc_r=dpc.loc[:, ['ID','code']]
# g_dpc_r_1: made to check the details (: name of the code, ‘name’)
g_dpc_r_1=dpc.loc[:, ['ID','code','name']]
# Dummy Encoding with ‘name’
g_r = pd.get_dummies(dpc_r['code'])
# Reconstruct simulated data for AI learning
df_concat_dpc_get_dummies = pd.concat([dpc_r, g_r], axis=1)
# Remove features that may be the cause of the data leak
dpc_Remove_data_leak = df_concat_dpc_get_dummies.drop(["code",160094710,160094810,160094910,150285010,2113008,8842965,8843014,622224401,810000000,160060010], axis=1)
# Sum up the number of occurrences of each feature for each patient.
total_patient_features= dpc_Remove_data_leak.groupby("ID").sum()
total_patient_features.reset_index()
# Load a new file with ID and treatment availability
# Prepare training data
image_file_path_ID_and_polyp_pn = './simulated_patient_data.csv'
with codecs.open(image_file_path_ID_and_polyp_pn, "r", "Shift-JIS", "ignore") as file:
ID_and_polyp_pn = pd.read_table(file, delimiter=",")
ID_and_polyp_pn_data= ID_and_polyp_pn[['ID', 'target']]
#Combine the new file containing ID and treatment status with the file after dummy encoding by the ‘name’
ID_treatment_medical_statement=pd.merge(ID_and_polyp_pn_data,total_patient_features,on=["ID"],how='outer')
ID_treatment_medical_statement_o= ID_treatment_medical_statement.fillna(0)
ID_treatment_medical_statement_p=ID_treatment_medical_statement_o.drop("ID", axis=1)
ID_treatment_medical_statement_rename= ID_treatment_medical_statement_p.rename(columns={'code':"Receipt type code"})
merge_data= ID_treatment_medical_statement_rename
# Split the training/validation set into 80% and the test set into 20%, with a constant proportion of cases with lesions
X = merge_data.drop("target",axis=1).values
y = merge_data["target"].values
columns_name = merge_data.drop("target",axis=1).columns
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2,random_state=1)
# Create a function to divide data
def data_split(X,y):
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
X_train = pd.DataFrame(X_train, columns=columns_name)
X_test = pd.DataFrame(X_test, columns=columns_name)
return X_train, y_train, X_test, y_test
# Separate into training, validation, and test set
X_train, y_train, X_test, y_test = data_split(X, y)
X_train, y_train, X_val, y_val = data_split(X_train.values, y_train)
# Make test set into pandas
X_test_df = pd.DataFrame(X_test)
y_test_df = pd.DataFrame(y_test)
# Make test set into test_df to keep away for the final process
test_dfp = pd.concat([y_test_df,X_test_df], axis=1)
test_df=test_dfp.rename(columns={0:"target"})
# Make training/validation sets into pandas
y_trainp = pd.DataFrame(y_train)
X_trainp = pd.DataFrame(X_train)
train=pd.concat([y_trainp, X_trainp], axis=1)
y_valp = pd.DataFrame(y_val)
X_valp = pd.DataFrame(X_val)
val=pd.concat([y_valp, X_valp], axis=1)
test_vol=pd.concat([train, val])
training_validation_sets=test_vol.rename(columns={0:"target"})
# Create a function to save the results and feature importance after analysis with lightGBM
def reg_top10_lightGBM(merge_data,outname,no,random_state_number):
# Define the objective variable
X = merge_data.drop("target",axis=1).values
y = merge_data["target"].values
columns_name = merge_data.drop("target",axis=1).columns
# Define a function
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=random_state_number)
def data_split(X,y):
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
X_train = pd.DataFrame(X_train, columns=columns_name)
X_test = pd.DataFrame(X_test, columns=columns_name)
return X_train, y_train, X_test, y_test
X_train, y_train, X_test, y_test = data_split(X, y)
X_train, y_train, X_val, y_val = data_split(X_train.values, y_train)
y_test_df = pd.DataFrame(y_test)
# Prepare dataset: training data: X_train, label: y_train
train = lgb.Dataset(X_train, label=y_train)
valid = lgb.Dataset(X_val, label=y_val)
# Set the parameters
params = {'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
'learning_rate': 0.1 }
# Train the model
model = lgb.train(params,
train,
valid_sets=valid,
num_boost_round=3000,
early_stopping_rounds=100)
# Prediction
y_pred = model.predict(X_test, num_iteration=model.best_iteration)
# Display actual values and predicted values
df_pred = pd.DataFrame({'regression_y_test':y_test,'regression_y_pred':y_pred})
# Calculate MSE (Mean Square Error)
mse = mean_squared_error(y_test, y_pred)
# Calculate RSME = √MSE
rmse = np.sqrt(mse)
# r2 : Calculate the coefficient of determination
r2 = r2_score(y_test,y_pred)
df_Df = pd.DataFrame({'regression_y_test_'+no:y_test,'regression_y_pred_'+no:y_pred,'RMSE_'+no:rmse,'R2_'+no:r2})
df_Df.to_csv(r""+"./"+outname+no+'.csv', encoding = 'shift-jis')
importance = pd.DataFrame(model.feature_importance(), columns=['importance'])
column_list=merge_data.drop(["target"], axis=1)
importance["columns"] =list(column_list.columns)
return importance
# Find out Top 50 features procedure / Run the model once
importance = reg_top10_lightGBM(training_validation_sets,"check_data","_1",1)
# Create a function that sorts and stores the values of feature importance.
def after_imp_save_sort(importance,outname,no):
importance.sort_values(by='importance',ascending=False)
i_df=importance.sort_values(by='importance',ascending=False)
top50=i_df.iloc[0:51,:]
g_dpc_pre= g_dpc_r_1.drop(["ID"], axis=1)
g_dpc_Remove_duplicates=g_dpc_pre.drop_duplicates()
g_dpc_r_columns=g_dpc_Remove_duplicates.rename(columns={'code':"columns"})
importance_name=pd.merge(top50,g_dpc_r_columns)
importance_all=pd.merge(i_df,g_dpc_r_columns)
importance_all.to_csv(r""+"./"+outname+no+'importance_name_all'+'.csv', encoding = 'shift-jis')
return importance_all
# Run a function to sort and save the values of feature importance.
top50_importance_all = after_imp_save_sort(importance,"check_data","_1")
# 10 runs of this procedure
dict = {}
for num in range(10):
print(num+1)
importance = reg_top10_lightGBM(training_validation_sets,"check_data","_"+str(num+1),num+1)
top50_importance_all = after_imp_save_sort(importance,"check_data","_"+str(num+1))
dict[str(num)] = top50_importance_all
# Recall and merge the saved CSV files
def concat_importance(First_pd,Next_pd):
importance_1=pd.DataFrame(dict[First_pd])
importance_1d=importance_1.drop_duplicates(subset='columns')
importance_2=pd.DataFrame(dict[Next_pd])
importance_2d=importance_2.drop_duplicates(subset='columns')
importance_1_2=pd.concat([importance_1d, importance_2d])
return importance_1_2
importance_1_2 = concat_importance("0","1")
importance_3_4 = concat_importance("2","3")
importance_5_6 = concat_importance("4","5")
importance_7_8 = concat_importance("6","7")
importance_9_10 = concat_importance("8","9")
importance_1_4=pd.concat([importance_1_2, importance_3_4])
importance_1_6=pd.concat([importance_1_4, importance_5_6])
importance_1_8=pd.concat([importance_1_6, importance_7_8])
importance_1_10=pd.concat([importance_1_8, importance_9_10])
# Calculate the total value of the feature importance for each code
group_sum=importance_1_10.groupby(["columns"]).sum()
group_sum_s = group_sum.sort_values('importance', ascending=False)
importance_group_sum=group_sum_s.reset_index()
# Create train/validation test data with all features
merge_data_test=pd.concat([training_validation_sets, test_df])
# Make features in the order of highest total feature impotance value
importance_top50_previous_data=importance_group_sum["columns"]
importance_top50_previous_data
# refine the data to top 50 features
dict_top50 = {}
pycaret_dict_top50 = {}
X = range(1, 51)
for i,v in enumerate(X):
dict_top50[str(i)] = importance_top50_previous_data.iloc[v]
pycaret_dict_top50[importance_top50_previous_data[i]] = merge_data_test[dict_top50[str(i)]]
pycaret_df_dict_top50=pd.DataFrame(pycaret_dict_top50)
# Add the value of target (: objective variable)
target_data=merge_data_test["target"]
target_top50_dataframe=pd.concat([target_data, pycaret_df_dict_top50], axis=1)
# adjust pandas (pycaret needs to set “str” to “int”)
target_top50_dataframe_int=target_top50_dataframe.astype('int')
target_top50_dataframe_columns=target_top50_dataframe_int.columns.astype(str)
numpy_target_top50=target_top50_dataframe_int.to_numpy()
target_top50_dataframe_pycaret=pd.DataFrame(numpy_target_top50,columns=target_top50_dataframe_columns)
# compare the models
from pycaret.classification import *
clf1 = setup(target_top50_dataframe_pycaret, target ='target',train_size = 0.8,data_split_shuffle=False,fold=10,session_id=0)
best_model = compare_models()
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.sqrt",
"pandas.DataFrame",
"pandas.merge",
"lightgbm.train",
"sklearn.metrics.mean_squared_error",
"lightgbm.Dataset",
"pandas.read_table",
"pandas.get_dummies",
"codecs.open",
"sklearn.metrics.r2_score",
"pandas.concat"
] | [((674, 703), 'pandas.get_dummies', 'pd.get_dummies', (["dpc_r['code']"], {}), "(dpc_r['code'])\n", (688, 703), True, 'import pandas as pd\n'), ((778, 809), 'pandas.concat', 'pd.concat', (['[dpc_r, g_r]'], {'axis': '(1)'}), '([dpc_r, g_r], axis=1)\n', (787, 809), True, 'import pandas as pd\n'), ((1684, 1762), 'pandas.merge', 'pd.merge', (['ID_and_polyp_pn_data', 'total_patient_features'], {'on': "['ID']", 'how': '"""outer"""'}), "(ID_and_polyp_pn_data, total_patient_features, on=['ID'], how='outer')\n", (1692, 1762), True, 'import pandas as pd\n'), ((2348, 2413), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(0.2)', 'random_state': '(1)'}), '(n_splits=1, test_size=0.2, random_state=1)\n', (2370, 2413), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((3027, 3047), 'pandas.DataFrame', 'pd.DataFrame', (['X_test'], {}), '(X_test)\n', (3039, 3047), True, 'import pandas as pd\n'), ((3060, 3080), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {}), '(y_test)\n', (3072, 3080), True, 'import pandas as pd\n'), ((3156, 3197), 'pandas.concat', 'pd.concat', (['[y_test_df, X_test_df]'], {'axis': '(1)'}), '([y_test_df, X_test_df], axis=1)\n', (3165, 3197), True, 'import pandas as pd\n'), ((3298, 3319), 'pandas.DataFrame', 'pd.DataFrame', (['y_train'], {}), '(y_train)\n', (3310, 3319), True, 'import pandas as pd\n'), ((3331, 3352), 'pandas.DataFrame', 'pd.DataFrame', (['X_train'], {}), '(X_train)\n', (3343, 3352), True, 'import pandas as pd\n'), ((3359, 3398), 'pandas.concat', 'pd.concat', (['[y_trainp, X_trainp]'], {'axis': '(1)'}), '([y_trainp, X_trainp], axis=1)\n', (3368, 3398), True, 'import pandas as pd\n'), ((3408, 3427), 'pandas.DataFrame', 'pd.DataFrame', (['y_val'], {}), '(y_val)\n', (3420, 3427), True, 'import pandas as pd\n'), ((3437, 3456), 'pandas.DataFrame', 'pd.DataFrame', (['X_val'], {}), '(X_val)\n', (3449, 3456), True, 'import pandas as pd\n'), ((3461, 3496), 'pandas.concat', 'pd.concat', (['[y_valp, X_valp]'], {'axis': '(1)'}), '([y_valp, X_valp], axis=1)\n', (3470, 3496), True, 'import pandas as pd\n'), ((3506, 3529), 'pandas.concat', 'pd.concat', (['[train, val]'], {}), '([train, val])\n', (3515, 3529), True, 'import pandas as pd\n'), ((7893, 7936), 'pandas.concat', 'pd.concat', (['[importance_1_2, importance_3_4]'], {}), '([importance_1_2, importance_3_4])\n', (7902, 7936), True, 'import pandas as pd\n'), ((7952, 7995), 'pandas.concat', 'pd.concat', (['[importance_1_4, importance_5_6]'], {}), '([importance_1_4, importance_5_6])\n', (7961, 7995), True, 'import pandas as pd\n'), ((8011, 8054), 'pandas.concat', 'pd.concat', (['[importance_1_6, importance_7_8]'], {}), '([importance_1_6, importance_7_8])\n', (8020, 8054), True, 'import pandas as pd\n'), ((8071, 8115), 'pandas.concat', 'pd.concat', (['[importance_1_8, importance_9_10]'], {}), '([importance_1_8, importance_9_10])\n', (8080, 8115), True, 'import pandas as pd\n'), ((8421, 8467), 'pandas.concat', 'pd.concat', (['[training_validation_sets, test_df]'], {}), '([training_validation_sets, test_df])\n', (8430, 8467), True, 'import pandas as pd\n'), ((8935, 8967), 'pandas.DataFrame', 'pd.DataFrame', (['pycaret_dict_top50'], {}), '(pycaret_dict_top50)\n', (8947, 8967), True, 'import pandas as pd\n'), ((9078, 9133), 'pandas.concat', 'pd.concat', (['[target_data, pycaret_df_dict_top50]'], {'axis': '(1)'}), '([target_data, pycaret_df_dict_top50], axis=1)\n', (9087, 9133), True, 'import pandas as pd\n'), ((9418, 9490), 'pandas.DataFrame', 'pd.DataFrame', (['numpy_target_top50'], {'columns': 'target_top50_dataframe_columns'}), '(numpy_target_top50, columns=target_top50_dataframe_columns)\n', (9430, 9490), True, 'import pandas as pd\n'), ((334, 390), 'codecs.open', 'codecs.open', (['image_file_path', '"""r"""', '"""Shift-JIS"""', '"""ignore"""'], {}), "(image_file_path, 'r', 'Shift-JIS', 'ignore')\n", (345, 390), False, 'import codecs\n'), ((410, 444), 'pandas.read_table', 'pd.read_table', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (423, 444), True, 'import pandas as pd\n'), ((1352, 1424), 'codecs.open', 'codecs.open', (['image_file_path_ID_and_polyp_pn', '"""r"""', '"""Shift-JIS"""', '"""ignore"""'], {}), "(image_file_path_ID_and_polyp_pn, 'r', 'Shift-JIS', 'ignore')\n", (1363, 1424), False, 'import codecs\n'), ((1456, 1490), 'pandas.read_table', 'pd.read_table', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (1469, 1490), True, 'import pandas as pd\n'), ((2663, 2706), 'pandas.DataFrame', 'pd.DataFrame', (['X_train'], {'columns': 'columns_name'}), '(X_train, columns=columns_name)\n', (2675, 2706), True, 'import pandas as pd\n'), ((2724, 2766), 'pandas.DataFrame', 'pd.DataFrame', (['X_test'], {'columns': 'columns_name'}), '(X_test, columns=columns_name)\n', (2736, 2766), True, 'import pandas as pd\n'), ((3967, 4055), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(0.2)', 'random_state': 'random_state_number'}), '(n_splits=1, test_size=0.2, random_state=\n random_state_number)\n', (3989, 4055), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((4567, 4587), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {}), '(y_test)\n', (4579, 4587), True, 'import pandas as pd\n'), ((4662, 4697), 'lightgbm.Dataset', 'lgb.Dataset', (['X_train'], {'label': 'y_train'}), '(X_train, label=y_train)\n', (4673, 4697), True, 'import lightgbm as lgb\n'), ((4710, 4741), 'lightgbm.Dataset', 'lgb.Dataset', (['X_val'], {'label': 'y_val'}), '(X_val, label=y_val)\n', (4721, 4741), True, 'import lightgbm as lgb\n'), ((4981, 5076), 'lightgbm.train', 'lgb.train', (['params', 'train'], {'valid_sets': 'valid', 'num_boost_round': '(3000)', 'early_stopping_rounds': '(100)'}), '(params, train, valid_sets=valid, num_boost_round=3000,\n early_stopping_rounds=100)\n', (4990, 5076), True, 'import lightgbm as lgb\n'), ((5312, 5384), 'pandas.DataFrame', 'pd.DataFrame', (["{'regression_y_test': y_test, 'regression_y_pred': y_pred}"], {}), "({'regression_y_test': y_test, 'regression_y_pred': y_pred})\n", (5324, 5384), True, 'import pandas as pd\n'), ((5432, 5466), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5450, 5466), False, 'from sklearn.metrics import mean_squared_error\n'), ((5506, 5518), 'numpy.sqrt', 'np.sqrt', (['mse'], {}), '(mse)\n', (5513, 5518), True, 'import numpy as np\n'), ((5582, 5606), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5590, 5606), False, 'from sklearn.metrics import r2_score\n'), ((5618, 5750), 'pandas.DataFrame', 'pd.DataFrame', (["{('regression_y_test_' + no): y_test, ('regression_y_pred_' + no): y_pred,\n ('RMSE_' + no): rmse, ('R2_' + no): r2}"], {}), "({('regression_y_test_' + no): y_test, ('regression_y_pred_' +\n no): y_pred, ('RMSE_' + no): rmse, ('R2_' + no): r2})\n", (5630, 5750), True, 'import pandas as pd\n'), ((6617, 6649), 'pandas.merge', 'pd.merge', (['top50', 'g_dpc_r_columns'], {}), '(top50, g_dpc_r_columns)\n', (6625, 6649), True, 'import pandas as pd\n'), ((6668, 6699), 'pandas.merge', 'pd.merge', (['i_df', 'g_dpc_r_columns'], {}), '(i_df, g_dpc_r_columns)\n', (6676, 6699), True, 'import pandas as pd\n'), ((7366, 7394), 'pandas.DataFrame', 'pd.DataFrame', (['dict[First_pd]'], {}), '(dict[First_pd])\n', (7378, 7394), True, 'import pandas as pd\n'), ((7477, 7504), 'pandas.DataFrame', 'pd.DataFrame', (['dict[Next_pd]'], {}), '(dict[Next_pd])\n', (7489, 7504), True, 'import pandas as pd\n'), ((7589, 7630), 'pandas.concat', 'pd.concat', (['[importance_1d, importance_2d]'], {}), '([importance_1d, importance_2d])\n', (7598, 7630), True, 'import pandas as pd\n'), ((4270, 4313), 'pandas.DataFrame', 'pd.DataFrame', (['X_train'], {'columns': 'columns_name'}), '(X_train, columns=columns_name)\n', (4282, 4313), True, 'import pandas as pd\n'), ((4331, 4373), 'pandas.DataFrame', 'pd.DataFrame', (['X_test'], {'columns': 'columns_name'}), '(X_test, columns=columns_name)\n', (4343, 4373), True, 'import pandas as pd\n')] |
#%%
import sys
import numpy as np
from typing import Any, List
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
sys.path.append('C:/Users/panos/Documents/Διπλωματική/code/fz')
from arfftocsv import function_labelize
import csv
colnames =['age', 'sex', 'cp', 'trestbps', 'chol',
'fbs', 'restecg', 'thalach','exang', 'oldpeak', 'slope',
'ca', 'thal', 'cvd']
# %%
df1 = function_labelize(dest = 'labeled_data1.txt',
labels=colnames, source = 'processed.hungarian.csv')
df2 = function_labelize(dest = 'labeled_data2.txt',
labels=colnames, source = 'processed.cleveland.data')
df3 = function_labelize(dest = 'labeled_data3.txt',
labels=colnames, source = 'processed.va.csv')
df4 =function_labelize(dest = 'labeled_data4.txt',
labels=colnames, source = 'processed.switzerland.csv')
df = pd.concat([df1,df2,df3,df4], axis=0)
print(df.isna().sum())
df['cvd'] = df['cvd'].replace([2,3,4], 1)
scaler = MinMaxScaler()
X = df[colnames[:-1]]
y = df[colnames[-1]]
X_norm = scaler.fit_transform(X)
print(X_norm)
print(y)
# %%
| [
"arfftocsv.function_labelize",
"sys.path.append",
"sklearn.preprocessing.MinMaxScaler",
"pandas.concat"
] | [((130, 193), 'sys.path.append', 'sys.path.append', (['"""C:/Users/panos/Documents/Διπλωματική/code/fz"""'], {}), "('C:/Users/panos/Documents/Διπλωματική/code/fz')\n", (145, 193), False, 'import sys\n'), ((388, 487), 'arfftocsv.function_labelize', 'function_labelize', ([], {'dest': '"""labeled_data1.txt"""', 'labels': 'colnames', 'source': '"""processed.hungarian.csv"""'}), "(dest='labeled_data1.txt', labels=colnames, source=\n 'processed.hungarian.csv')\n", (405, 487), False, 'from arfftocsv import function_labelize\n'), ((494, 594), 'arfftocsv.function_labelize', 'function_labelize', ([], {'dest': '"""labeled_data2.txt"""', 'labels': 'colnames', 'source': '"""processed.cleveland.data"""'}), "(dest='labeled_data2.txt', labels=colnames, source=\n 'processed.cleveland.data')\n", (511, 594), False, 'from arfftocsv import function_labelize\n'), ((601, 693), 'arfftocsv.function_labelize', 'function_labelize', ([], {'dest': '"""labeled_data3.txt"""', 'labels': 'colnames', 'source': '"""processed.va.csv"""'}), "(dest='labeled_data3.txt', labels=colnames, source=\n 'processed.va.csv')\n", (618, 693), False, 'from arfftocsv import function_labelize\n'), ((699, 800), 'arfftocsv.function_labelize', 'function_labelize', ([], {'dest': '"""labeled_data4.txt"""', 'labels': 'colnames', 'source': '"""processed.switzerland.csv"""'}), "(dest='labeled_data4.txt', labels=colnames, source=\n 'processed.switzerland.csv')\n", (716, 800), False, 'from arfftocsv import function_labelize\n'), ((806, 845), 'pandas.concat', 'pd.concat', (['[df1, df2, df3, df4]'], {'axis': '(0)'}), '([df1, df2, df3, df4], axis=0)\n', (815, 845), True, 'import pandas as pd\n'), ((917, 931), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (929, 931), False, 'from sklearn.preprocessing import MinMaxScaler\n')] |
"""
The follwing constructor classes exists here:
+------------------------------------------+---------------------------------------+
| Class | Description |
+==========================================+=======================================+
| :py:class:`~matlab2cpp.datatype.Type` | Frontend for the datatype string |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Dim` | Reference to the number of dimensions |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Mem` | Reference to the memory type |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Num` | Numerical value indicator |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Suggest` | Frontend for suggested datatype |
+------------------------------------------+---------------------------------------+
"""
import supplement
import matlab2cpp as mc
dim0 = {"int", "float", "uword", "double", "cx_double", "size_t"}
dim1 = {"ivec", "fvec", "uvec", "vec", "cx_vec"}
dim2 = {"irowvec", "frowvec", "urowvec", "rowvec", "cx_rowvec"}
dim3 = {"imat", "fmat", "umat", "mat", "cx_mat"}
dim4 = {"icube", "fcube", "ucube", "cube", "cx_cube"}
dims = [dim0, dim1, dim2, dim3, dim4]
mem0 = {"uword", "uvec", "urowvec", "umat", "ucube"}
mem1 = {"int", "ivec", "irowvec", "imat", "icube"}
mem2 = {"float", "fvec", "frowvec", "fmat", "fcube"}
mem3 = {"double", "vec", "rowvec", "mat", "cube"}
mem4 = {"cx_double", "cx_vec", "cx_rowvec", "cx_mat", "cx_cube"}
mems = [mem0, mem1, mem2, mem3, mem4]
others = {"char", "string", "TYPE", "func_lambda", "struct", "structs", "cell",
"wall_clock", "SPlot"}
def common_loose(vals):
"""Common denominator among several names.
Loose enforcment"""
if not isinstance(vals, (tuple, list)) or \
isinstance(vals[0], int):
vals = [vals]
vals = list(vals)
for i in xrange(len(vals)):
if isinstance(vals[i], str):
continue
if isinstance(vals[i][0], int):
vals[i] = get_name(*vals[i])
vals = set(vals)
if len(vals) == 1:
return vals.pop()
vals.discard("TYPE")
if len(vals) == 1:
return vals.pop()
for other in others:
vals.discard(other)
if len(vals) == 0:
return "TYPE"
elif len(vals) == 1:
return vals.pop()
dims_ = map(get_dim, vals)
if dims_:
dim = max(*dims_)
else:
return "TYPE"
if dim == 2 and 1 in dims_:
dim = 3
types = map(get_mem, vals)
type = max(*types)
val = get_name(dim, type)
return val
def common_strict(vals):
"""Common denominator among several names.
Strict enforcment"""
if not isinstance(vals, (tuple, list)) \
or isinstance(vals[0], int):
vals = [vals]
vals = list(vals)
for i in xrange(len(vals)):
if isinstance(vals[i], str):
continue
if isinstance(vals[i][0], int):
vals[i] = get_name(*vals[i])
vals = set(vals)
if len(vals) == 1:
return vals.pop()
for other in others:
if other in vals:
return "TYPE"
dims_ = map(get_dim, vals)
dim = max(*dims_)
if dim == 2 and 1 in dims_:
return "TYPE"
types = map(get_mem, vals)
type = max(*types)
val = get_name(dim, type)
return val
def pointer_split(name):
p = name.count("*")
if not p:
return 0, name
return p, name[:-p]
def get_dim(val):
while val[-1] == "*":
val = val[:-1]
if val in dim0: dim = 0
elif val in dim1: dim = 1
elif val in dim2: dim = 2
elif val in dim3: dim = 3
elif val in dim4: dim = 4
elif val in others: dim = None
else:
raise ValueError("Datatype '%s' not recognized" % val)
return dim
def get_mem(val):
while val[-1] == "*":
val = val[:-1]
if val in mem0: mem = 0
elif val in mem1: mem = 1
elif val in mem2: mem = 2
elif val in mem3: mem = 3
elif val in mem4: mem = 4
elif val in others: mem = None
else:
raise ValueError("Datatype '%s' not recognized" % val)
return mem
def get_num(val):
while val[-1] == "*":
val = val[:-1]
if val in others: num = False
else: num = True
return num
def get_name(dim, mem):
return dims[dim].intersection(mems[mem]).pop()
def get_type(instance):
if instance.prop["type"] == "TYPE":
instance = instance.declare
return instance.prop["type"]
class Dim(object):
"""
The `node.dim` is a help variable for handling numerical datatype.
It represents the number of dimension a numerical object represents:
+-------+--------------+
| *dim* | Description |
+=======+==============+
| 0 | scalar |
+-------+--------------+
| 1 | (col-)vector |
+-------+--------------+
| 2 | row-vector |
+-------+--------------+
| 3 | matrix |
+-------+--------------+
| 4 | cube |
+-------+--------------+
| None | Other |
+-------+--------------+
The variable can be both read and set in real time:
>>> node = mc.Var(None, "name")
>>> node.type="float"
>>> print node.dim
0
>>> node.dim = 3
>>> print node.type
fmat
"""
def __get__(self, instance, owner):
if instance is None:
return self
return get_dim(get_type(instance))
def __set__(self, instance, value):
mem = get_mem(get_type(instance))
instance.prop["type"] = get_name(value, mem)
class Mem(object):
"""
The `node.mem` is a help variable for handling numerical datatype.
It represents the internal basic datatype represented in memory:
+-------+-------------+
| *mem* | Description |
+=======+=============+
| 0 | unsiged int |
+-------+-------------+
| 1 | integer |
+-------+-------------+
| 2 | float |
+-------+-------------+
| 3 | double |
+-------+-------------+
| 4 | complex |
+-------+-------------+
| None | Other |
+-------+-------------+
The variable can be both read and set in real time:
>>> node = mc.Var(None, "name")
>>> node.type="float"
>>> print node.mem
2
>>> node.mem = 3
>>> print node.type
double
"""
def __get__(self, instance, owner):
if instance is None:
return self
return get_mem(get_type(instance))
def __set__(self, instance, value):
dim = get_dim(get_type(instance))
instance.prop["type"] = get_name(dim, value)
class Num(object):
"""
The `node.num` is a help variable for handling numerical datatype. It is
a boolean values which is true given that the datatype is of numerical type.
"""
def __get__(self, instance, owner):
if instance is None:
return self
return get_num(get_type(instance))
def __set__(self, instance, value):
if not value:
instance.prop["type"] = "TYPE"
else:
raise AttributeError("num can not be set True consistently")
class Type(object):
"""
Datatypes can be roughly split into two groups: **numerical** and
**non-numerical** types. The numerical types are as follows:
+-------------+--------------+-----------+-----------+----------+-------------+
| | unsigned int | int | float | double | complex |
+=============+==============+===========+===========+==========+=============+
| scalar | *uword* | *int* | *float* | *double* | *cx_double* |
+-------------+--------------+-----------+-----------+----------+-------------+
| vector | *uvec* | *ivec* | *fvec* | *vec* | *cx_vec* |
+-------------+--------------+-----------+-----------+----------+-------------+
| row\-vector | *urowvec* | *irowvec* | *frowvec* | *rowvec* | *cx_rowvec* |
+-------------+--------------+-----------+-----------+----------+-------------+
| matrix | *umat* | *imat* | *fmat* | *mat* | *cx_mat* |
+-------------+--------------+-----------+-----------+----------+-------------+
| cube | *ucube* | *icube* | *fcube* | *cube* | *cx_cube* |
+-------------+--------------+-----------+-----------+----------+-------------+
Values along the horizontal axis represents the amount of memory reserved per
element, and the along the vertical axis represents the various number of
dimensions. The names are equivalent to the ones in the Armadillo package.
The non-numerical types are as follows:
+---------------+------------------------+
| Name | Description |
+===============+========================+
| *char* | Single text character |
+---------------+------------------------+
| *string* | Text string |
+---------------+------------------------+
| *struct* | Struct container |
+---------------+------------------------+
| *structs* | Struct array container |
+---------------+------------------------+
| *func_lambda* | Anonymous function |
+---------------+------------------------+
The node datatype can be referenced by any node through `node.type` and can be
inserted as placeholder through `%(type)s`.
"""
def __get__(self, instance, owner):
if instance is None:
return self
return get_type(instance)
def __set__(self, instance, value):
value = value or "TYPE"
if isinstance(value, str):
p, value = pointer_split(value)
instance.pointer = p
else:
value = common_strict(value)
instance.prop["type"] = value
class Suggest(object):
"""Same as Type, but for suggested value.
"""
def __set__(self, instance, value):
if value == "TYPE":
return
instance.declare.prop["suggest"] = value
def __get__(self, instance, owner):
return supplement.suggests.get(instance)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"supplement.suggests.get",
"doctest.testmod"
] | [((10305, 10322), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (10320, 10322), False, 'import doctest\n'), ((10220, 10253), 'supplement.suggests.get', 'supplement.suggests.get', (['instance'], {}), '(instance)\n', (10243, 10253), False, 'import supplement\n')] |
#!/usr/bin/env python3
"""awspfx
Usage:
awspfx.py <profile>
awspfx.py [(-c | --current) | (-l | --list) | (-s | --swap)]
awspfx.py token [(-p | --profile) <profile>]
awspfx.py sso [(login | token)] [(-p | --profile) <profile>]
awspfx.py -h | --help
awspfx.py --version
Examples:
awspfx.py default # Change profile to 'default'
awspfx.py token # Token from current profile, default from SSO
awspfx.py token -p default # Token from profile 'default'
awspfx.py (-c | -l | -s)
SubCommands:
token Generate credentials
-p --profile Select profile
Options:
-c --current Change the profile
-l --list List profiles
-s --swap Swap previous the profile
-h --help Show this screen.
--version Show version.
WIP:
sso Option to login
sts Option to assume-role
"""
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from configparser import ConfigParser as cfgParser
import boto3
from colorlog import ColoredFormatter
from docopt import docopt
from iterfzf import iterfzf
def setup_logging():
log_level = logging.INFO
log_format = "\n%(log_color)s%(levelname)s%(reset)s => %(log_color)s%(message)s%(reset)s"
logging.root.setLevel(log_level)
formatter = ColoredFormatter(log_format)
stream_ = logging.StreamHandler()
stream_.setLevel(log_level)
stream_.setFormatter(formatter)
log_ = logging.getLogger("pythonConfig")
log_.setLevel(log_level)
log_.addHandler(stream_)
return log_
def exit_err(msg):
log.error(msg)
sys.exit()
def has_which(command, err=True):
cmd = shutil.which(command) is not None
if cmd:
return command
else:
if err:
exit_err(f"Command not installed: {command}")
else:
return False
def has_file(file, create=False):
f = os.path.isfile(file) or False
if not f:
if create:
f_ = open(file, "w+")
f_.close()
else:
exit_err(f"File not exist: {file}")
return file
def run_cmd(command):
rc, out = subprocess.getstatusoutput(command)
if rc != 0:
err = "Occurred: ", out
exit_err(err)
return out
def fzf(data: list, current: str = None):
cmd = has_which("fzf", err=False)
if not cmd:
print(*data, sep="\n")
exit_err("Not installed 'fzf'")
return iterfzf(data) or exit_err("you did not choose any of the options")
def sed_inplace(filename, pattern, repl):
p = re.compile(pattern, re.MULTILINE)
with tempfile.NamedTemporaryFile(mode="w", delete=False) as tmp_file:
with open(filename, "r") as file:
text = file.read()
if "AWS_PROFILE" in text:
new = p.sub(repl, text)
tmp_file.write(new)
else:
print("No exist profile")
tmp_file.write(text)
tmp_file.write(f"export {repl}")
shutil.copystat(filename, tmp_file.name)
shutil.move(tmp_file.name, filename)
def setup_aws(ctx: str = None):
try:
if ctx is None:
# if aws_profile_env is None:
# del os.environ['AWS_PROFILE']
aws_session = boto3.session.Session()
else:
aws_session = boto3.session.Session(profile_name=ctx)
return aws_session
except Exception as e:
exit_err(e)
def current_profile(err=True):
ctx = aws.profile_name
if err:
return ctx or exit_err("Getting current profile")
return ctx
def get_profiles(err=True):
try:
ctx_ls = aws.available_profiles
ctx = sorted(ctx_ls, reverse=True)
if err:
return ctx or exit_err("Getting profile list")
return ctx
except Exception as e:
log.error(e)
def list_profiles(lst=False):
ctx_current = current_profile(err=False)
ctx_list = get_profiles()
if lst:
ctx = reversed(ctx_list)
print(*ctx, sep="\n")
else:
p = fzf(data=ctx_list, current=ctx_current)
return p
def read_profile():
with open(awspfx_cache, 'r') as file:
r = file.read()
return r
def save_profile(ctx_current):
ctx = ctx_current if ctx_current else ""
with open(awspfx_cache, "w") as file:
file.write(ctx)
def switch_profile(ctx, ctx_current):
ctx_old = f'AWS_PROFILE="{ctx_current}"'
ctx_repl = f'AWS_PROFILE="{ctx}"'
sed_inplace(envrc_file, ctx_old, ctx_repl)
save_profile(ctx_current)
run_cmd("direnv allow && direnv reload")
def set_profile(ctx, ctx_current=None, sms=None):
if not ctx_current:
ctx_current = current_profile(err=False)
if ctx == ctx_current:
log.warning(f"The profile is not changed: {ctx_current}")
else:
switch_profile(ctx, ctx_current)
sms_text = sms or f"Switched to profile: {ctx}"
log.info(sms_text)
def swap_profile():
ctx = read_profile()
if ctx:
sms_text = f"Switched to previous profile: {ctx}"
set_profile(ctx=ctx, sms=sms_text)
def exist_profile(ctx):
if ctx in get_profiles():
return True
else:
exit_err(f"Profile does not exist: {ctx}")
def sso(account_id, role_name):
client = aws.client("sso", region_name="us-east-1")
aws_sso_cache = os.path.expanduser("~/.aws/sso/cache")
json_files = [
pos_json for pos_json in os.listdir(
aws_sso_cache
) if pos_json.endswith(
".json"
)
]
for json_file in json_files:
path = f"{aws_sso_cache}/{json_file}"
with open(path) as file:
data = json.load(file)
if "accessToken" in data:
access_token = data['accessToken']
try:
cred = client.get_role_credentials(
accountId=account_id,
roleName=role_name,
accessToken=access_token
)
return cred
except Exception as e:
log.error(e)
log.warning("The SSO session associated with this profile has expired "
"or is otherwise invalid. To refresh this SSO session run "
"aws sso login with the corresponding profile.")
sys.exit(2)
def sts(account_id, role, region):
role_info = {
"RoleArn": f"arn:aws:iam::{account_id}:role/{role}",
"RoleSessionName": "session01"
}
client = aws.client("sts", region_name=region)
cred = client.assume_role(**role_info)
return cred
def get_token(ctx, sso_=True, sts_=False):
aws_cred = cfgParser()
aws_cred.read(creds_file)
act_id = os.getenv("AWS_ACCOUNT_ID") or aws_cred.get(ctx, "account_id")
act_role = os.getenv("AWS_ROLE_NAME") or aws_cred.get(ctx, "role_name")
act_region = os.getenv("AWS_REGION") or aws_cred.get(ctx, "region")
if sso_:
cred = sso(account_id=act_id, role_name=act_role)
elif sts_:
cred = sts(account_id=act_id, role=act_role, region=act_region)
else:
cred = {}
exit_err("Not select option from token")
aws_access_key_id = cred['roleCredentials']['accessKeyId']
aws_secret_access_key = cred['roleCredentials']['secretAccessKey']
aws_session_token = cred['roleCredentials']['sessionToken']
# print('Save Credentials in ~/.aws/credentials ...')
aws_cred.set(ctx, "aws_access_key_id", aws_access_key_id)
aws_cred.set(ctx, "aws_secret_access_key", aws_secret_access_key)
aws_cred.set(ctx, "aws_session_token", aws_session_token)
with open(creds_file, "w") as f:
aws_cred.write(f)
def main(argv):
ctx = argv['<profile>']
if ctx == "token" or argv['token']:
if argv['--profile']:
if exist_profile(ctx):
get_token(ctx)
log.info(f"Generate token to: {ctx}")
else:
ctx = current_profile()
get_token(ctx)
log.info(f"Generate token to: {ctx}")
sys.exit()
if ctx == "sso" or argv['sso']:
print("sso")
sys.exit()
if argv['--current']:
log.info(f"The current profile is: '{current_profile()}'")
sys.exit()
if argv['--list']:
list_profiles(lst=True)
sys.exit()
if argv['--swap']:
swap_profile()
sys.exit()
if ctx or ctx is None:
if ctx is None:
ctx_profile = list_profiles()
else:
ctx_profile = ctx if exist_profile(ctx) else sys.exit()
set_profile(ctx_profile)
sys.exit()
if __name__ == "__main__":
log = setup_logging()
home_path = os.getenv('HOME') or exit_err("Home directory does not exist?")
# aws_profile_env = os.getenv("AWS_PROFILE")
aws = setup_aws()
awspfx_cache = has_file(f"{home_path}/.aws/awspfx", create=True)
direnv = has_which("direnv")
envrc_file = has_file(f"{home_path}/.envrc")
creds_file = has_file(f"{home_path}/.aws/credentials")
arguments = docopt(__doc__, version=f'awspfx 0.1.6 - python {sys.version}')
main(arguments)
| [
"logging.getLogger",
"logging.StreamHandler",
"boto3.session.Session",
"configparser.ConfigParser",
"re.compile",
"sys.exit",
"docopt.docopt",
"os.listdir",
"shutil.move",
"iterfzf.iterfzf",
"logging.root.setLevel",
"tempfile.NamedTemporaryFile",
"os.path.expanduser",
"shutil.copystat",
... | [((1326, 1358), 'logging.root.setLevel', 'logging.root.setLevel', (['log_level'], {}), '(log_level)\n', (1347, 1358), False, 'import logging\n'), ((1375, 1403), 'colorlog.ColoredFormatter', 'ColoredFormatter', (['log_format'], {}), '(log_format)\n', (1391, 1403), False, 'from colorlog import ColoredFormatter\n'), ((1418, 1441), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1439, 1441), False, 'import logging\n'), ((1521, 1554), 'logging.getLogger', 'logging.getLogger', (['"""pythonConfig"""'], {}), "('pythonConfig')\n", (1538, 1554), False, 'import logging\n'), ((1674, 1684), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1682, 1684), False, 'import sys\n'), ((2205, 2240), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (['command'], {}), '(command)\n', (2231, 2240), False, 'import subprocess\n'), ((2627, 2660), 're.compile', 're.compile', (['pattern', 're.MULTILINE'], {}), '(pattern, re.MULTILINE)\n', (2637, 2660), False, 'import re\n'), ((3074, 3114), 'shutil.copystat', 'shutil.copystat', (['filename', 'tmp_file.name'], {}), '(filename, tmp_file.name)\n', (3089, 3114), False, 'import shutil\n'), ((3119, 3155), 'shutil.move', 'shutil.move', (['tmp_file.name', 'filename'], {}), '(tmp_file.name, filename)\n', (3130, 3155), False, 'import shutil\n'), ((5445, 5483), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.aws/sso/cache"""'], {}), "('~/.aws/sso/cache')\n", (5463, 5483), False, 'import os\n'), ((6698, 6709), 'configparser.ConfigParser', 'cfgParser', ([], {}), '()\n', (6707, 6709), True, 'from configparser import ConfigParser as cfgParser\n'), ((9095, 9158), 'docopt.docopt', 'docopt', (['__doc__'], {'version': 'f"""awspfx 0.1.6 - python {sys.version}"""'}), "(__doc__, version=f'awspfx 0.1.6 - python {sys.version}')\n", (9101, 9158), False, 'from docopt import docopt\n'), ((1731, 1752), 'shutil.which', 'shutil.which', (['command'], {}), '(command)\n', (1743, 1752), False, 'import shutil\n'), ((1967, 1987), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1981, 1987), False, 'import os\n'), ((2508, 2521), 'iterfzf.iterfzf', 'iterfzf', (['data'], {}), '(data)\n', (2515, 2521), False, 'from iterfzf import iterfzf\n'), ((2671, 2722), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (2698, 2722), False, 'import tempfile\n'), ((6754, 6781), 'os.getenv', 'os.getenv', (['"""AWS_ACCOUNT_ID"""'], {}), "('AWS_ACCOUNT_ID')\n", (6763, 6781), False, 'import os\n'), ((6832, 6858), 'os.getenv', 'os.getenv', (['"""AWS_ROLE_NAME"""'], {}), "('AWS_ROLE_NAME')\n", (6841, 6858), False, 'import os\n'), ((6910, 6933), 'os.getenv', 'os.getenv', (['"""AWS_REGION"""'], {}), "('AWS_REGION')\n", (6919, 6933), False, 'import os\n'), ((8090, 8100), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8098, 8100), False, 'import sys\n'), ((8167, 8177), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8175, 8177), False, 'import sys\n'), ((8280, 8290), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8288, 8290), False, 'import sys\n'), ((8355, 8365), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8363, 8365), False, 'import sys\n'), ((8421, 8431), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8429, 8431), False, 'import sys\n'), ((8651, 8661), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8659, 8661), False, 'import sys\n'), ((8733, 8750), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (8742, 8750), False, 'import os\n'), ((3339, 3362), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (3360, 3362), False, 'import boto3\n'), ((3403, 3442), 'boto3.session.Session', 'boto3.session.Session', ([], {'profile_name': 'ctx'}), '(profile_name=ctx)\n', (3424, 3442), False, 'import boto3\n'), ((5537, 5562), 'os.listdir', 'os.listdir', (['aws_sso_cache'], {}), '(aws_sso_cache)\n', (5547, 5562), False, 'import os\n'), ((5775, 5790), 'json.load', 'json.load', (['file'], {}), '(file)\n', (5784, 5790), False, 'import json\n'), ((6353, 6364), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (6361, 6364), False, 'import sys\n'), ((8597, 8607), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8605, 8607), False, 'import sys\n')] |
"""
Re-tooled version of the script found on VideoToTextDNN:
https://github.com/OSUPCVLab/VideoToTextDNN/blob/master/data/process_frames.py
"""
import sys
import os
import argparse
import time
from multiprocessing import Pool
def main(args):
src_dir = args.src_dir
dst_dir = args.dst_dir
start = int(args.start)
end = int(args.end)
PREPEND = args.prepend
src_files = os.listdir(src_dir)
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
tuple_list = []
for video_file in src_files[start:end]:
src_path = os.path.join(src_dir, video_file)
dst_path = os.path.join(dst_dir, video_file)
tuple_list.append((PREPEND, video_file, src_path, dst_path))
pool = Pool() # Default to number cores
pool.map(process_vid, tuple_list)
pool.close()
pool.join()
def process_vid(args):
(PREPEND, video_file, src_path, dst_path) = args
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
# command = 'ffmpeg -i '+ src_path+' -s 256x256 '+ dst_path + '/%5d.jpg' #with resize
command = PREPEND + 'ffmpeg -i '+ src_path+' -r 20 '+ dst_path + '/%6d.jpg > /dev/null 2>&1' #6 is to be in accordance with C3D features.
print(command)
os.system(command)
else:
print("Frames directory already found at {}".format(dst_path))
if __name__=='__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'src_dir',
help='directory where videos are'
)
arg_parser.add_argument(
'dst_dir',
help='directory where to store frames'
)
arg_parser.add_argument(
'start',
help='start index (inclusive)'
)
arg_parser.add_argument(
'end',
help='end index (noninclusive)'
)
arg_parser.add_argument(
'--prepend',
default='',
help='optional prepend to start of ffmpeg command (in case you want to use a non-system wide version of ffmpeg)'
'For example: --prepend ~/anaconda2/bin/ will use ffmpeg installed in anaconda2'
)
if not len(sys.argv) > 1:
print(arg_parser.print_help())
sys.exit(0)
args = arg_parser.parse_args()
start_time = time.time()
main(args)
print("Job took %s mins" % ((time.time() - start_time)/60))
| [
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"os.path.isdir",
"multiprocessing.Pool",
"os.mkdir",
"sys.exit",
"os.system",
"time.time"
] | [((394, 413), 'os.listdir', 'os.listdir', (['src_dir'], {}), '(src_dir)\n', (404, 413), False, 'import os\n'), ((731, 737), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (735, 737), False, 'from multiprocessing import Pool\n'), ((1393, 1418), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1416, 1418), False, 'import argparse\n'), ((2232, 2243), 'time.time', 'time.time', ([], {}), '()\n', (2241, 2243), False, 'import time\n'), ((426, 448), 'os.path.isdir', 'os.path.isdir', (['dst_dir'], {}), '(dst_dir)\n', (439, 448), False, 'import os\n'), ((458, 475), 'os.mkdir', 'os.mkdir', (['dst_dir'], {}), '(dst_dir)\n', (466, 475), False, 'import os\n'), ((562, 595), 'os.path.join', 'os.path.join', (['src_dir', 'video_file'], {}), '(src_dir, video_file)\n', (574, 595), False, 'import os\n'), ((615, 648), 'os.path.join', 'os.path.join', (['dst_dir', 'video_file'], {}), '(dst_dir, video_file)\n', (627, 648), False, 'import os\n'), ((925, 948), 'os.path.isdir', 'os.path.isdir', (['dst_path'], {}), '(dst_path)\n', (938, 948), False, 'import os\n'), ((958, 976), 'os.mkdir', 'os.mkdir', (['dst_path'], {}), '(dst_path)\n', (966, 976), False, 'import os\n'), ((1249, 1267), 'os.system', 'os.system', (['command'], {}), '(command)\n', (1258, 1267), False, 'import os\n'), ((2166, 2177), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2174, 2177), False, 'import sys\n'), ((2292, 2303), 'time.time', 'time.time', ([], {}), '()\n', (2301, 2303), False, 'import time\n')] |
from django.contrib.auth.hashers import get_hashers_by_algorithm
from django.core import checks
@checks.register(checks.Tags.security, deploy=True)
def check_for_plaintext_passwords(app_configs, **kwargs):
if "plaintext" in get_hashers_by_algorithm():
yield checks.Critical(
"Plaintext module should not be used in production.", hint="Remove it."
)
| [
"django.core.checks.register",
"django.contrib.auth.hashers.get_hashers_by_algorithm",
"django.core.checks.Critical"
] | [((99, 149), 'django.core.checks.register', 'checks.register', (['checks.Tags.security'], {'deploy': '(True)'}), '(checks.Tags.security, deploy=True)\n', (114, 149), False, 'from django.core import checks\n'), ((230, 256), 'django.contrib.auth.hashers.get_hashers_by_algorithm', 'get_hashers_by_algorithm', ([], {}), '()\n', (254, 256), False, 'from django.contrib.auth.hashers import get_hashers_by_algorithm\n'), ((272, 365), 'django.core.checks.Critical', 'checks.Critical', (['"""Plaintext module should not be used in production."""'], {'hint': '"""Remove it."""'}), "('Plaintext module should not be used in production.', hint=\n 'Remove it.')\n", (287, 365), False, 'from django.core import checks\n')] |
from app.extensions import db
from flask import current_app
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
access_token = db.Column(db.String())
jit_feature = db.Column(db.Boolean())
recurrence_resch_feature = db.Column(db.Boolean())
streaks_feature = db.Column(db.Boolean())
in_line_comment_feature = db.Column(db.Boolean())
def __init__(self, id, access_token, jit_feature, recurrence_resch_feature, streaks_feature, in_line_comment_feature):
self.id = id
self.access_token = access_token
self.jit_feature = jit_feature
self.recurrence_resch_feature = recurrence_resch_feature
self.streaks_feature = streaks_feature
self.in_line_comment_feature = in_line_comment_feature
def __repr__(self):
return '<id {}, access token {}, jit feature {}, recurrence resch feature {}, streaks feature {}, in-line comment feature {}>'.\
format(self.id, self.access_token, self.jit_feature, self.recurrence_resch_feature, self.streaks_feature, self.in_line_comment_feature)
def launch_task(self, name, description, *args, **kwargs):
current_app.task_queue.enqueue('app.tasks.' + name, self.id, *args, **kwargs)
| [
"flask.current_app.task_queue.enqueue",
"app.extensions.db.Boolean",
"app.extensions.db.String",
"app.extensions.db.Column"
] | [((121, 160), 'app.extensions.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (130, 160), False, 'from app.extensions import db\n'), ((190, 201), 'app.extensions.db.String', 'db.String', ([], {}), '()\n', (199, 201), False, 'from app.extensions import db\n'), ((231, 243), 'app.extensions.db.Boolean', 'db.Boolean', ([], {}), '()\n', (241, 243), False, 'from app.extensions import db\n'), ((286, 298), 'app.extensions.db.Boolean', 'db.Boolean', ([], {}), '()\n', (296, 298), False, 'from app.extensions import db\n'), ((332, 344), 'app.extensions.db.Boolean', 'db.Boolean', ([], {}), '()\n', (342, 344), False, 'from app.extensions import db\n'), ((386, 398), 'app.extensions.db.Boolean', 'db.Boolean', ([], {}), '()\n', (396, 398), False, 'from app.extensions import db\n'), ((1182, 1259), 'flask.current_app.task_queue.enqueue', 'current_app.task_queue.enqueue', (["('app.tasks.' + name)", 'self.id', '*args'], {}), "('app.tasks.' + name, self.id, *args, **kwargs)\n", (1212, 1259), False, 'from flask import current_app\n')] |
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
from output.models.boeing_data.ipo4.ipo_xsd.ipo import AddressType
__NAMESPACE__ = "http://www.example.com/IPO"
class Usstate(Enum):
AK = "AK"
AL = "AL"
AR = "AR"
CA = "CA"
PA = "PA"
@dataclass
class Ukaddress(AddressType):
class Meta:
name = "UKAddress"
postcode: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"[A-Z]{2}\d\s\d[A-Z]{2}",
}
)
export_code: int = field(
init=False,
default=1,
metadata={
"name": "exportCode",
"type": "Attribute",
}
)
@dataclass
class Usaddress(AddressType):
class Meta:
name = "USAddress"
state: Optional[Usstate] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
}
)
zip: Optional[int] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
}
)
| [
"dataclasses.field"
] | [((414, 542), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True, 'pattern':\n '[A-Z]{2}\\\\d\\\\s\\\\d[A-Z]{2}'}"}), "(default=None, metadata={'type': 'Element', 'namespace': '',\n 'required': True, 'pattern': '[A-Z]{2}\\\\d\\\\s\\\\d[A-Z]{2}'})\n", (419, 542), False, 'from dataclasses import dataclass, field\n'), ((641, 727), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '(1)', 'metadata': "{'name': 'exportCode', 'type': 'Attribute'}"}), "(init=False, default=1, metadata={'name': 'exportCode', 'type':\n 'Attribute'})\n", (646, 727), False, 'from dataclasses import dataclass, field\n'), ((907, 995), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=None, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (912, 995), False, 'from dataclasses import dataclass, field\n'), ((1086, 1174), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': '', 'required': True}"}), "(default=None, metadata={'type': 'Element', 'namespace': '',\n 'required': True})\n", (1091, 1174), False, 'from dataclasses import dataclass, field\n')] |
# <NAME>
# CPSC 386-01
# 2021-11-29
# <EMAIL>
# @JaredDyreson
#
# Lab 00-04
#
# Some filler text
#
"""
This module contains the Intro display class
"""
import pygame
import functools
import sys
import pathlib
import typing
import os
import dataclasses
import random
from pprint import pprint as pp
import time
from Invaders.Dataclasses.point import Point
from Invaders.Displays.display import Display
from Invaders.UI.button import Button
# from Invaders.Entities.cacodemon import Cacodemon
# from Invaders.Entities.Entity import Entity
from Invaders.Entities.enemy_matrix import EnemyMatrix
# from Invaders.Entities.Player import Player
from Invaders.Entities.Entity import Entity
from Invaders.Dataclasses.direction import Direction
# TODO : move this to its own respective module or something like that
def absolute_file_paths(directory: pathlib.Path) -> typing.List[pathlib.Path]:
"""
List the contents of a directory with their absolute path
@param directory: path where to look
@return: typing.List[pathlib.Path]
"""
return [
pathlib.Path(os.path.abspath(os.path.join(dirpath, f)))
for dirpath, _, filenames in os.walk(directory)
for f in filenames
]
class AnimationDisplay(Display):
def __init__(self):
super().__init__()
self.break_from_draw = False
self.entities = EnemyMatrix(5, 5, self._display_surface)
self.main_player = Entity(
self._display_surface, ["assets/rocket.png"], Point(550, 700)
)
# self.main_player = Player(self._display_surface, [
# "assets/rocket.png"], Point(550, 700))
self.DRAW_NEXT_ENTITY = pygame.USEREVENT + 1
self.ENEMY_FIRE_INTERVAL = pygame.USEREVENT + 2
self.score, self.lives = 0, 3
self.score_label_position = Point(775, 20)
self.lives_label_position = Point(775, 60)
def draw(self) -> None:
draw_loop = True
pygame.time.set_timer(self.DRAW_NEXT_ENTITY, 300)
pygame.time.set_timer(self.ENEMY_FIRE_INTERVAL, 2000)
will_move = False
enemy_group = pygame.sprite.Group()
player_group = pygame.sprite.Group()
enemy_laser_group = pygame.sprite.Group()
player_group.add(self.main_player)
# print(player_group)
for x, row in enumerate(self.entities.matrix):
for y, column in enumerate(row):
enemy_group.add(column.entity)
# FIXME
while draw_loop and not self.break_from_draw:
positions = self.entities.scan_column() # FIXME: this code is not working
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == self.DRAW_NEXT_ENTITY:
self._display_surface.fill(pygame.Color("black"))
enemy_group.update(1)
elif event.type == self.ENEMY_FIRE_INTERVAL:
for position in random.choices(positions, k=2):
x, y = position.container
__laser = self.entities.matrix[x][y].entity.fire(
Direction.SOUTH.value, True
)
enemy_laser_group.add(__laser)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.main_player.fire(Direction.NORTH.value)
if event.key == pygame.K_LEFT:
self.main_player.position.x -= 20
if event.key == pygame.K_RIGHT:
self.main_player.position.x += 20
will_move = True
elif event.type != pygame.KEYDOWN:
will_move = False
if pygame.sprite.groupcollide(
self.main_player.lasers, enemy_group, True, True
):
self.score += 20
if pygame.sprite.groupcollide(
enemy_laser_group, player_group, False, False
):
print("hit the player!")
self.lives -= 1
self._display_surface.fill(self.background_color)
enemy_group.draw(self._display_surface)
self.main_player.draw()
self.main_player.lasers.draw(self._display_surface)
enemy_laser_group.draw(self._display_surface)
enemy_laser_group.update()
if not enemy_group:
draw_loop = False
self.write_text(
f"Score: {self.score}",
self.score_label_position,
pygame.font.SysFont(None, 30),
)
self.write_text(
f"Lives: {self.lives}",
self.lives_label_position,
pygame.font.SysFont(None, 30),
)
self.main_player.update(1)
pygame.display.flip()
self.fps_meter.tick(60)
| [
"pygame.sprite.groupcollide",
"Invaders.Dataclasses.point.Point",
"pygame.quit",
"pygame.event.get",
"pygame.sprite.Group",
"Invaders.Entities.enemy_matrix.EnemyMatrix",
"pygame.display.flip",
"os.path.join",
"random.choices",
"sys.exit",
"pygame.Color",
"pygame.time.set_timer",
"pygame.font... | [((1366, 1406), 'Invaders.Entities.enemy_matrix.EnemyMatrix', 'EnemyMatrix', (['(5)', '(5)', 'self._display_surface'], {}), '(5, 5, self._display_surface)\n', (1377, 1406), False, 'from Invaders.Entities.enemy_matrix import EnemyMatrix\n'), ((1820, 1834), 'Invaders.Dataclasses.point.Point', 'Point', (['(775)', '(20)'], {}), '(775, 20)\n', (1825, 1834), False, 'from Invaders.Dataclasses.point import Point\n'), ((1871, 1885), 'Invaders.Dataclasses.point.Point', 'Point', (['(775)', '(60)'], {}), '(775, 60)\n', (1876, 1885), False, 'from Invaders.Dataclasses.point import Point\n'), ((1948, 1997), 'pygame.time.set_timer', 'pygame.time.set_timer', (['self.DRAW_NEXT_ENTITY', '(300)'], {}), '(self.DRAW_NEXT_ENTITY, 300)\n', (1969, 1997), False, 'import pygame\n'), ((2006, 2059), 'pygame.time.set_timer', 'pygame.time.set_timer', (['self.ENEMY_FIRE_INTERVAL', '(2000)'], {}), '(self.ENEMY_FIRE_INTERVAL, 2000)\n', (2027, 2059), False, 'import pygame\n'), ((2110, 2131), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (2129, 2131), False, 'import pygame\n'), ((2155, 2176), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (2174, 2176), False, 'import pygame\n'), ((2205, 2226), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (2224, 2226), False, 'import pygame\n'), ((1166, 1184), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1173, 1184), False, 'import os\n'), ((1500, 1515), 'Invaders.Dataclasses.point.Point', 'Point', (['(550)', '(700)'], {}), '(550, 700)\n', (1505, 1515), False, 'from Invaders.Dataclasses.point import Point\n'), ((2633, 2651), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2649, 2651), False, 'import pygame\n'), ((3857, 3933), 'pygame.sprite.groupcollide', 'pygame.sprite.groupcollide', (['self.main_player.lasers', 'enemy_group', '(True)', '(True)'], {}), '(self.main_player.lasers, enemy_group, True, True)\n', (3883, 3933), False, 'import pygame\n'), ((4014, 4087), 'pygame.sprite.groupcollide', 'pygame.sprite.groupcollide', (['enemy_laser_group', 'player_group', '(False)', '(False)'], {}), '(enemy_laser_group, player_group, False, False)\n', (4040, 4087), False, 'import pygame\n'), ((4972, 4993), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4991, 4993), False, 'import pygame\n'), ((1102, 1126), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (1114, 1126), False, 'import os\n'), ((4701, 4730), 'pygame.font.SysFont', 'pygame.font.SysFont', (['None', '(30)'], {}), '(None, 30)\n', (4720, 4730), False, 'import pygame\n'), ((4875, 4904), 'pygame.font.SysFont', 'pygame.font.SysFont', (['None', '(30)'], {}), '(None, 30)\n', (4894, 4904), False, 'import pygame\n'), ((2719, 2732), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2730, 2732), False, 'import pygame\n'), ((2753, 2763), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2761, 2763), False, 'import sys\n'), ((2869, 2890), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (2881, 2890), False, 'import pygame\n'), ((3031, 3061), 'random.choices', 'random.choices', (['positions'], {'k': '(2)'}), '(positions, k=2)\n', (3045, 3061), False, 'import random\n')] |
import os
import re
MODEL_FILE_FORMAT = 'weights.{epoch:02d}-{val_loss:.2f}.h5'
MODEL_REGEX_PATTERN = re.compile(r'^.*weights\.(\d+)\-\d+\.\d+\.h5$')
LAST_MODEL_FILE_FORMAT = 'last.h5'
TEAMS_WEBHOOK_URL = os.environ.get('TEAMS_WEBHOOK_URL', '') | [
"os.environ.get",
"re.compile"
] | [((103, 156), 're.compile', 're.compile', (['"""^.*weights\\\\.(\\\\d+)\\\\-\\\\d+\\\\.\\\\d+\\\\.h5$"""'], {}), "('^.*weights\\\\.(\\\\d+)\\\\-\\\\d+\\\\.\\\\d+\\\\.h5$')\n", (113, 156), False, 'import re\n'), ((206, 245), 'os.environ.get', 'os.environ.get', (['"""TEAMS_WEBHOOK_URL"""', '""""""'], {}), "('TEAMS_WEBHOOK_URL', '')\n", (220, 245), False, 'import os\n')] |
# -*- test-case-name: vumi.blinkenlights.tests.test_metrics_workers -*-
import time
import random
import hashlib
from datetime import datetime
from twisted.python import log
from twisted.internet.defer import inlineCallbacks, Deferred
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.internet.protocol import DatagramProtocol
from vumi.service import Consumer, Publisher, Worker
from vumi.blinkenlights.metrics import (MetricsConsumer, MetricManager, Count,
Metric, Timer, Aggregator)
from vumi.blinkenlights.message20110818 import MetricMessage
class AggregatedMetricConsumer(Consumer):
"""Consumer for aggregate metrics.
Parameters
----------
callback : function (metric_name, values)
Called for each metric datapoint as it arrives. The
parameters are metric_name (str) and values (a list of
timestamp and value pairs).
"""
exchange_name = "vumi.metrics.aggregates"
exchange_type = "direct"
durable = True
routing_key = "vumi.metrics.aggregates"
def __init__(self, callback):
self.queue_name = self.routing_key
self.callback = callback
def consume_message(self, vumi_message):
msg = MetricMessage.from_dict(vumi_message.payload)
for metric_name, _aggregators, values in msg.datapoints():
self.callback(metric_name, values)
class AggregatedMetricPublisher(Publisher):
"""Publishes aggregated metrics.
"""
exchange_name = "vumi.metrics.aggregates"
exchange_type = "direct"
durable = True
routing_key = "vumi.metrics.aggregates"
def publish_aggregate(self, metric_name, timestamp, value):
# TODO: perhaps change interface to publish multiple metrics?
msg = MetricMessage()
msg.append((metric_name, (), [(timestamp, value)]))
self.publish_message(msg)
class TimeBucketConsumer(Consumer):
"""Consume time bucketed metric messages.
Parameters
----------
bucket : int
Bucket to consume time buckets from.
callback : function, f(metric_name, aggregators, values)
Called for each metric datapoint as it arrives.
The parameters are metric_name (str),
aggregator (list of aggregator names) and values (a
list of timestamp and value pairs).
"""
exchange_name = "vumi.metrics.buckets"
exchange_type = "direct"
durable = True
ROUTING_KEY_TEMPLATE = "bucket.%d"
def __init__(self, bucket, callback):
self.queue_name = self.ROUTING_KEY_TEMPLATE % bucket
self.routing_key = self.queue_name
self.callback = callback
def consume_message(self, vumi_message):
msg = MetricMessage.from_dict(vumi_message.payload)
for metric_name, aggregators, values in msg.datapoints():
self.callback(metric_name, aggregators, values)
class TimeBucketPublisher(Publisher):
"""Publish time bucketed metric messages.
Parameters
----------
buckets : int
Total number of buckets messages are being
distributed to.
bucket_size : int, in seconds
Size of each time bucket in seconds.
"""
exchange_name = "vumi.metrics.buckets"
exchange_type = "direct"
durable = True
ROUTING_KEY_TEMPLATE = "bucket.%d"
def __init__(self, buckets, bucket_size):
self.buckets = buckets
self.bucket_size = bucket_size
def find_bucket(self, metric_name, ts_key):
md5 = hashlib.md5("%s:%d" % (metric_name, ts_key))
return int(md5.hexdigest(), 16) % self.buckets
def publish_metric(self, metric_name, aggregates, values):
timestamp_buckets = {}
for timestamp, value in values:
ts_key = int(timestamp) / self.bucket_size
ts_bucket = timestamp_buckets.get(ts_key)
if ts_bucket is None:
ts_bucket = timestamp_buckets[ts_key] = []
ts_bucket.append((timestamp, value))
for ts_key, ts_bucket in timestamp_buckets.iteritems():
bucket = self.find_bucket(metric_name, ts_key)
routing_key = self.ROUTING_KEY_TEMPLATE % bucket
msg = MetricMessage()
msg.append((metric_name, aggregates, ts_bucket))
self.publish_message(msg, routing_key=routing_key)
class MetricTimeBucket(Worker):
"""Gathers metrics messages and redistributes them to aggregators.
:class:`MetricTimeBuckets` take metrics from the vumi.metrics
exchange and redistribute them to one of N :class:`MetricAggregator`
workers.
There can be any number of :class:`MetricTimeBucket` workers.
Configuration Values
--------------------
buckets : int (N)
The total number of aggregator workers. :class:`MetricAggregator`
workers must be started with bucket numbers 0 to N-1 otherwise
metric data will go missing (or at best be stuck in a queue
somewhere).
bucket_size : int, in seconds
The amount of time each time bucket represents.
"""
@inlineCallbacks
def startWorker(self):
log.msg("Starting a MetricTimeBucket with config: %s" % self.config)
buckets = int(self.config.get("buckets"))
log.msg("Total number of buckets %d" % buckets)
bucket_size = int(self.config.get("bucket_size"))
log.msg("Bucket size is %d seconds" % bucket_size)
self.publisher = yield self.start_publisher(TimeBucketPublisher,
buckets, bucket_size)
self.consumer = yield self.start_consumer(MetricsConsumer,
self.publisher.publish_metric)
class DiscardedMetricError(Exception):
pass
class MetricAggregator(Worker):
"""Gathers a subset of metrics and aggregates them.
:class:`MetricAggregators` work in sets of N.
Configuration Values
--------------------
bucket : int, 0 to N-1
An aggregator needs to know which number out of N it is. This is
its bucket number.
bucket_size : int, in seconds
The amount of time each time bucket represents.
lag : int, seconds, optional
The number of seconds after a bucket's time ends to wait
before processing the bucket. Default is 5s.
"""
_time = time.time # hook for faking time in tests
def _ts_key(self, time):
return int(time) / self.bucket_size
@inlineCallbacks
def startWorker(self):
log.msg("Starting a MetricAggregator with config: %s" % self.config)
bucket = int(self.config.get("bucket"))
log.msg("MetricAggregator bucket %d" % bucket)
self.bucket_size = int(self.config.get("bucket_size"))
log.msg("Bucket size is %d seconds" % self.bucket_size)
self.lag = float(self.config.get("lag", 5.0))
# ts_key -> { metric_name -> (aggregate_set, values) }
# values is a list of (timestamp, value) pairs
self.buckets = {}
# initialize last processed bucket
self._last_ts_key = self._ts_key(self._time() - self.lag) - 2
self.publisher = yield self.start_publisher(AggregatedMetricPublisher)
self.consumer = yield self.start_consumer(TimeBucketConsumer,
bucket, self.consume_metric)
self._task = LoopingCall(self.check_buckets)
done = self._task.start(self.bucket_size, False)
done.addErrback(lambda failure: log.err(failure,
"MetricAggregator bucket checking task died"))
def check_buckets(self):
"""Periodically clean out old buckets and calculate aggregates."""
# key for previous bucket
current_ts_key = self._ts_key(self._time() - self.lag) - 1
for ts_key in self.buckets.keys():
if ts_key <= self._last_ts_key:
log.err(DiscardedMetricError("Throwing way old metric data: %r"
% self.buckets[ts_key]))
del self.buckets[ts_key]
elif ts_key <= current_ts_key:
aggregates = []
ts = ts_key * self.bucket_size
items = self.buckets[ts_key].iteritems()
for metric_name, (agg_set, values) in items:
for agg_name in agg_set:
agg_metric = "%s.%s" % (metric_name, agg_name)
agg_func = Aggregator.from_name(agg_name)
agg_value = agg_func([v[1] for v in values])
aggregates.append((agg_metric, agg_value))
for agg_metric, agg_value in aggregates:
self.publisher.publish_aggregate(agg_metric, ts,
agg_value)
del self.buckets[ts_key]
self._last_ts_key = current_ts_key
def consume_metric(self, metric_name, aggregates, values):
if not values:
return
ts_key = self._ts_key(values[0][0])
metrics = self.buckets.get(ts_key, None)
if metrics is None:
metrics = self.buckets[ts_key] = {}
metric = metrics.get(metric_name)
if metric is None:
metric = metrics[metric_name] = (set(), [])
existing_aggregates, existing_values = metric
existing_aggregates.update(aggregates)
existing_values.extend(values)
def stopWorker(self):
self._task.stop()
self.check_buckets()
class MetricsCollectorWorker(Worker):
@inlineCallbacks
def startWorker(self):
log.msg("Starting %s with config: %s" % (
type(self).__name__, self.config))
yield self.setup_worker()
self.consumer = yield self.start_consumer(
AggregatedMetricConsumer, self.consume_metrics)
def stopWorker(self):
log.msg("Stopping %s" % (type(self).__name__,))
return self.teardown_worker()
def setup_worker(self):
pass
def teardown_worker(self):
pass
def consume_metrics(self, metric_name, values):
raise NotImplementedError()
class GraphitePublisher(Publisher):
"""Publisher for sending messages to Graphite."""
exchange_name = "graphite"
exchange_type = "topic"
durable = True
auto_delete = False
delivery_mode = 2
require_bind = False # Graphite uses a topic exchange
def publish_metric(self, metric, value, timestamp):
self.publish_raw("%f %d" % (value, timestamp), routing_key=metric)
class GraphiteMetricsCollector(MetricsCollectorWorker):
"""Worker that collects Vumi metrics and publishes them to Graphite."""
@inlineCallbacks
def setup_worker(self):
self.graphite_publisher = yield self.start_publisher(GraphitePublisher)
def consume_metrics(self, metric_name, values):
for timestamp, value in values:
self.graphite_publisher.publish_metric(
metric_name, value, timestamp)
class UDPMetricsProtocol(DatagramProtocol):
def __init__(self, ip, port):
# NOTE: `host` must be an IP, not a hostname.
self._ip = ip
self._port = port
def startProtocol(self):
self.transport.connect(self._ip, self._port)
def send_metric(self, metric_string):
return self.transport.write(metric_string)
class UDPMetricsCollector(MetricsCollectorWorker):
"""Worker that collects Vumi metrics and publishes them over UDP."""
DEFAULT_FORMAT_STRING = '%(timestamp)s %(metric_name)s %(value)s\n'
DEFAULT_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S%z'
@inlineCallbacks
def setup_worker(self):
self.format_string = self.config.get(
'format_string', self.DEFAULT_FORMAT_STRING)
self.timestamp_format = self.config.get(
'timestamp_format', self.DEFAULT_TIMESTAMP_FORMAT)
self.metrics_ip = yield reactor.resolve(self.config['metrics_host'])
self.metrics_port = int(self.config['metrics_port'])
self.metrics_protocol = UDPMetricsProtocol(
self.metrics_ip, self.metrics_port)
self.listener = yield reactor.listenUDP(0, self.metrics_protocol)
def teardown_worker(self):
return self.listener.stopListening()
def consume_metrics(self, metric_name, values):
for timestamp, value in values:
timestamp = datetime.utcfromtimestamp(timestamp)
metric_string = self.format_string % {
'timestamp': timestamp.strftime(self.timestamp_format),
'metric_name': metric_name,
'value': value,
}
self.metrics_protocol.send_metric(metric_string)
class RandomMetricsGenerator(Worker):
"""Worker that publishes a set of random metrics.
Useful for tests and demonstrations.
Configuration Values
--------------------
manager_period : float in seconds, optional
How often to have the internal metric manager send metrics
messages. Default is 5s.
generator_period: float in seconds, optional
How often the random metric loop should send values to the
metric manager. Default is 1s.
"""
# callback for tests, f(worker)
# (or anyone else that wants to be notified when metrics are generated)
on_run = None
@inlineCallbacks
def startWorker(self):
log.msg("Starting the MetricsGenerator with config: %s" % self.config)
manager_period = float(self.config.get("manager_period", 5.0))
log.msg("MetricManager will sent metrics every %s seconds" %
manager_period)
generator_period = float(self.config.get("generator_period", 1.0))
log.msg("Random metrics values will be generated every %s seconds" %
generator_period)
self.mm = yield self.start_publisher(MetricManager, "vumi.random.",
manager_period)
self.counter = self.mm.register(Count("count"))
self.value = self.mm.register(Metric("value"))
self.timer = self.mm.register(Timer("timer"))
self.next = Deferred()
self.task = LoopingCall(self.run)
self.task.start(generator_period)
@inlineCallbacks
def run(self):
if random.choice([True, False]):
self.counter.inc()
self.value.set(random.normalvariate(2.0, 0.1))
with self.timer:
d = Deferred()
wait = random.uniform(0.0, 0.1)
reactor.callLater(wait, lambda: d.callback(None))
yield d
if self.on_run is not None:
self.on_run(self)
def stopWorker(self):
self.mm.stop()
self.task.stop()
log.msg("Stopping the MetricsGenerator")
| [
"datetime.datetime.utcfromtimestamp",
"random.uniform",
"random.choice",
"random.normalvariate",
"hashlib.md5",
"vumi.blinkenlights.metrics.Count",
"twisted.python.log.msg",
"twisted.internet.task.LoopingCall",
"twisted.python.log.err",
"vumi.blinkenlights.message20110818.MetricMessage.from_dict",... | [((1273, 1318), 'vumi.blinkenlights.message20110818.MetricMessage.from_dict', 'MetricMessage.from_dict', (['vumi_message.payload'], {}), '(vumi_message.payload)\n', (1296, 1318), False, 'from vumi.blinkenlights.message20110818 import MetricMessage\n'), ((1811, 1826), 'vumi.blinkenlights.message20110818.MetricMessage', 'MetricMessage', ([], {}), '()\n', (1824, 1826), False, 'from vumi.blinkenlights.message20110818 import MetricMessage\n'), ((2743, 2788), 'vumi.blinkenlights.message20110818.MetricMessage.from_dict', 'MetricMessage.from_dict', (['vumi_message.payload'], {}), '(vumi_message.payload)\n', (2766, 2788), False, 'from vumi.blinkenlights.message20110818 import MetricMessage\n'), ((3522, 3566), 'hashlib.md5', 'hashlib.md5', (["('%s:%d' % (metric_name, ts_key))"], {}), "('%s:%d' % (metric_name, ts_key))\n", (3533, 3566), False, 'import hashlib\n'), ((5136, 5204), 'twisted.python.log.msg', 'log.msg', (["('Starting a MetricTimeBucket with config: %s' % self.config)"], {}), "('Starting a MetricTimeBucket with config: %s' % self.config)\n", (5143, 5204), False, 'from twisted.python import log\n'), ((5263, 5310), 'twisted.python.log.msg', 'log.msg', (["('Total number of buckets %d' % buckets)"], {}), "('Total number of buckets %d' % buckets)\n", (5270, 5310), False, 'from twisted.python import log\n'), ((5377, 5427), 'twisted.python.log.msg', 'log.msg', (["('Bucket size is %d seconds' % bucket_size)"], {}), "('Bucket size is %d seconds' % bucket_size)\n", (5384, 5427), False, 'from twisted.python import log\n'), ((6494, 6562), 'twisted.python.log.msg', 'log.msg', (["('Starting a MetricAggregator with config: %s' % self.config)"], {}), "('Starting a MetricAggregator with config: %s' % self.config)\n", (6501, 6562), False, 'from twisted.python import log\n'), ((6619, 6665), 'twisted.python.log.msg', 'log.msg', (["('MetricAggregator bucket %d' % bucket)"], {}), "('MetricAggregator bucket %d' % bucket)\n", (6626, 6665), False, 'from twisted.python import log\n'), ((6737, 6792), 'twisted.python.log.msg', 'log.msg', (["('Bucket size is %d seconds' % self.bucket_size)"], {}), "('Bucket size is %d seconds' % self.bucket_size)\n", (6744, 6792), False, 'from twisted.python import log\n'), ((7356, 7387), 'twisted.internet.task.LoopingCall', 'LoopingCall', (['self.check_buckets'], {}), '(self.check_buckets)\n', (7367, 7387), False, 'from twisted.internet.task import LoopingCall\n'), ((13389, 13459), 'twisted.python.log.msg', 'log.msg', (["('Starting the MetricsGenerator with config: %s' % self.config)"], {}), "('Starting the MetricsGenerator with config: %s' % self.config)\n", (13396, 13459), False, 'from twisted.python import log\n'), ((13539, 13615), 'twisted.python.log.msg', 'log.msg', (["('MetricManager will sent metrics every %s seconds' % manager_period)"], {}), "('MetricManager will sent metrics every %s seconds' % manager_period)\n", (13546, 13615), False, 'from twisted.python import log\n'), ((13715, 13805), 'twisted.python.log.msg', 'log.msg', (["('Random metrics values will be generated every %s seconds' % generator_period)"], {}), "('Random metrics values will be generated every %s seconds' %\n generator_period)\n", (13722, 13805), False, 'from twisted.python import log\n'), ((14141, 14151), 'twisted.internet.defer.Deferred', 'Deferred', ([], {}), '()\n', (14149, 14151), False, 'from twisted.internet.defer import inlineCallbacks, Deferred\n'), ((14172, 14193), 'twisted.internet.task.LoopingCall', 'LoopingCall', (['self.run'], {}), '(self.run)\n', (14183, 14193), False, 'from twisted.internet.task import LoopingCall\n'), ((14288, 14316), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (14301, 14316), False, 'import random\n'), ((14731, 14771), 'twisted.python.log.msg', 'log.msg', (['"""Stopping the MetricsGenerator"""'], {}), "('Stopping the MetricsGenerator')\n", (14738, 14771), False, 'from twisted.python import log\n'), ((4211, 4226), 'vumi.blinkenlights.message20110818.MetricMessage', 'MetricMessage', ([], {}), '()\n', (4224, 4226), False, 'from vumi.blinkenlights.message20110818 import MetricMessage\n'), ((11915, 11959), 'twisted.internet.reactor.resolve', 'reactor.resolve', (["self.config['metrics_host']"], {}), "(self.config['metrics_host'])\n", (11930, 11959), False, 'from twisted.internet import reactor\n'), ((12151, 12194), 'twisted.internet.reactor.listenUDP', 'reactor.listenUDP', (['(0)', 'self.metrics_protocol'], {}), '(0, self.metrics_protocol)\n', (12168, 12194), False, 'from twisted.internet import reactor\n'), ((12389, 12425), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['timestamp'], {}), '(timestamp)\n', (12414, 12425), False, 'from datetime import datetime\n'), ((13996, 14010), 'vumi.blinkenlights.metrics.Count', 'Count', (['"""count"""'], {}), "('count')\n", (14001, 14010), False, 'from vumi.blinkenlights.metrics import MetricsConsumer, MetricManager, Count, Metric, Timer, Aggregator\n'), ((14050, 14065), 'vumi.blinkenlights.metrics.Metric', 'Metric', (['"""value"""'], {}), "('value')\n", (14056, 14065), False, 'from vumi.blinkenlights.metrics import MetricsConsumer, MetricManager, Count, Metric, Timer, Aggregator\n'), ((14105, 14119), 'vumi.blinkenlights.metrics.Timer', 'Timer', (['"""timer"""'], {}), "('timer')\n", (14110, 14119), False, 'from vumi.blinkenlights.metrics import MetricsConsumer, MetricManager, Count, Metric, Timer, Aggregator\n'), ((14372, 14402), 'random.normalvariate', 'random.normalvariate', (['(2.0)', '(0.1)'], {}), '(2.0, 0.1)\n', (14392, 14402), False, 'import random\n'), ((14445, 14455), 'twisted.internet.defer.Deferred', 'Deferred', ([], {}), '()\n', (14453, 14455), False, 'from twisted.internet.defer import inlineCallbacks, Deferred\n'), ((14475, 14499), 'random.uniform', 'random.uniform', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (14489, 14499), False, 'import random\n'), ((7485, 7547), 'twisted.python.log.err', 'log.err', (['failure', '"""MetricAggregator bucket checking task died"""'], {}), "(failure, 'MetricAggregator bucket checking task died')\n", (7492, 7547), False, 'from twisted.python import log\n'), ((8448, 8478), 'vumi.blinkenlights.metrics.Aggregator.from_name', 'Aggregator.from_name', (['agg_name'], {}), '(agg_name)\n', (8468, 8478), False, 'from vumi.blinkenlights.metrics import MetricsConsumer, MetricManager, Count, Metric, Timer, Aggregator\n')] |
from __future__ import absolute_import
from django import forms
from django.db import transaction
from sentry.models import (
OrganizationMember,
OrganizationMemberTeam,
Team,
)
class BaseOrganizationMemberForm(forms.ModelForm):
"""
Base form used by AddOrganizationMemberForm, InviteOrganizationMemberForm,
and EditOrganizationMemberForm
"""
teams = forms.ModelMultipleChoiceField(
queryset=Team.objects.none(),
widget=forms.CheckboxSelectMultiple(),
required=False,
)
role = forms.ChoiceField()
class Meta:
fields = ('role', )
model = OrganizationMember
def __init__(self, *args, **kwargs):
allowed_roles = kwargs.pop('allowed_roles')
all_teams = kwargs.pop('all_teams')
super(BaseOrganizationMemberForm, self).__init__(*args, **kwargs)
self.fields['role'].choices = ((r.id, r.name) for r in allowed_roles)
self.fields['teams'].queryset = all_teams
@transaction.atomic
def save_team_assignments(self, organization_member):
OrganizationMemberTeam.objects.filter(organizationmember=organization_member).delete()
OrganizationMemberTeam.objects.bulk_create(
[
OrganizationMemberTeam(team=team, organizationmember=organization_member)
for team in self.cleaned_data['teams']
]
)
| [
"sentry.models.OrganizationMemberTeam",
"sentry.models.OrganizationMemberTeam.objects.filter",
"django.forms.ChoiceField",
"sentry.models.Team.objects.none",
"django.forms.CheckboxSelectMultiple"
] | [((545, 564), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {}), '()\n', (562, 564), False, 'from django import forms\n'), ((436, 455), 'sentry.models.Team.objects.none', 'Team.objects.none', ([], {}), '()\n', (453, 455), False, 'from sentry.models import OrganizationMember, OrganizationMemberTeam, Team\n'), ((472, 502), 'django.forms.CheckboxSelectMultiple', 'forms.CheckboxSelectMultiple', ([], {}), '()\n', (500, 502), False, 'from django import forms\n'), ((1079, 1156), 'sentry.models.OrganizationMemberTeam.objects.filter', 'OrganizationMemberTeam.objects.filter', ([], {'organizationmember': 'organization_member'}), '(organizationmember=organization_member)\n', (1116, 1156), False, 'from sentry.models import OrganizationMember, OrganizationMemberTeam, Team\n'), ((1248, 1321), 'sentry.models.OrganizationMemberTeam', 'OrganizationMemberTeam', ([], {'team': 'team', 'organizationmember': 'organization_member'}), '(team=team, organizationmember=organization_member)\n', (1270, 1321), False, 'from sentry.models import OrganizationMember, OrganizationMemberTeam, Team\n')] |
from datetime import datetime
from typing import List, Optional
import bcrypt
from sqlalchemy.orm import Session
from . import models, schemas
def get_user(db: Session, id: int) -> models.User:
"""Return a single user by id.
Args:
db (Session): database connection
id (int): id of the user
Returns:
models.User: user
"""
return db.query(models.User).filter(models.User.id == id).first()
def get_user_by_name(db: Session, name: str) -> models.User:
"""Return a single user by name.
Args:
db (Session): database connection
name (str): name of the user
Returns:
models.User: user
"""
return db.query(models.User).filter(models.User.name == name).first()
def get_all_articles(db: Session) -> List[models.Article]:
"""Return all articles.
Args:
db (Session): database connection
Returns:
List[models.Article]: list of articles
"""
return db.query(models.Article).all()
def get_article(db: Session, id: int) -> models.Article:
"""Return a single article by id.
Args:
db (Session): database connection
id (int): id of the article
Returns:
models.Article: article
"""
return db.query(models.Article).filter(models.Article.id == id).first()
def create_user(db: Session, user: schemas.UserCreate) -> None:
"""Create a new user.
Args:
db (Session): database connection
user: (schemas.UserCreate): creation data
"""
new_user = models.User(name=user.name)
new_user.password = bcrypt.hashpw(user.password, bcrypt.gensalt())
db.add(new_user)
db.commit()
def check_user(db: Session, name: str, password: str) -> Optional[models.User]:
"""Return true if the name and password match.
Args:
db (Session): database connection
name (str): name of the user to check
password (str): password to check against
Returns:
Optional[models.User]: user if the password matches, otherwise None
"""
from_db = get_user_by_name(db, name)
if not from_db:
return None
if bcrypt.checkpw(password.encode('UTF-8'), from_db.password.encode('UTF-8')):
return from_db
return None
def create_article(db: Session, article: schemas.ArticleCreate, creator_id: int) -> None:
"""Create a new article.
Args:
db (Session): database connection
article (schemas.ArticleCreate): data creation data
creator_id (int): user id of the creator
"""
new_article = models.Article(**article.dict(), created_by=creator_id, time_created=datetime.utcnow())
db.add(new_article)
db.commit()
def update_article(db: Session, article: schemas.ArticleUpdate) -> None:
"""Update an article.
Args:
db (Session): database connection
article (schemas.ArticleUpdate): data update data
"""
from_db = get_article(db, article.id)
if article.title:
from_db.title = article.title
if article.content:
from_db.content = article.content
db.commit()
| [
"bcrypt.gensalt",
"datetime.datetime.utcnow"
] | [((1617, 1633), 'bcrypt.gensalt', 'bcrypt.gensalt', ([], {}), '()\n', (1631, 1633), False, 'import bcrypt\n'), ((2633, 2650), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2648, 2650), False, 'from datetime import datetime\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 <<EMAIL>>
#
# Distributed under terms of the BSD 3-Clause license.
import hashlib
import itertools
import json
from decimal import Decimal
from multiprocessing import (
cpu_count,
Pool,
Process,
Queue
)
class DecimalJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return float(obj)
return super(DecimalJsonEncoder, self).default(obj)
def dumps(*data, **kwargs):
return json.dumps(
data,
cls=DecimalJsonEncoder,
**kwargs,
)
def do_pooled_pow(last_proof, last_hash, difficulty):
queue = Queue()
with Pool(1) as p:
result = p.starmap_async(pool_worker, ((
queue,
i,
last_proof,
last_hash,
difficulty,
) for i in itertools.count()), chunksize=100)
proof = queue.get()
result.wait()
p.terminate()
return proof
def pool_worker(queue, proof, last_proof, last_hash, difficulty):
if valid_proof(last_proof, proof, last_hash):
queue.put(proof)
return proof
return None
def do_process_pow(last_proof, last_hash, difficulty):
queue = Queue()
processes = [
Process(
target=process_worker,
args=(
queue,
last_proof,
last_hash,
difficulty,
step,
)
) for step in range(cpu_count())
]
for p in processes:
p.start()
proof = queue.get()
for p in processes:
p.terminate()
return proof
def process_worker(queue, last_proof, last_hash, difficulty, step):
proof = step
while not valid_proof(last_proof, proof, last_hash, difficulty):
proof += step
queue.put(proof)
return
def valid_proof(last_proof, proof, last_hash, difficulty):
"""
Validates the Proof
:param last_proof: <int> Previous Proof
:param proof: <int> Current Proof
:param last_hash: <str> The hash of the Previous Block
:return: <bool> True if correct, False if not.
"""
guess = f'{last_proof}{proof}{last_hash}'.encode()
guess_hash = hashlib.sha256(guess)
binary_hash = ''.join(format(n, '08b') for n in guess_hash.digest())
return binary_hash[:difficulty] == '0' * difficulty
| [
"hashlib.sha256",
"multiprocessing.Process",
"json.dumps",
"multiprocessing.cpu_count",
"itertools.count",
"multiprocessing.Pool",
"multiprocessing.Queue"
] | [((508, 558), 'json.dumps', 'json.dumps', (['data'], {'cls': 'DecimalJsonEncoder'}), '(data, cls=DecimalJsonEncoder, **kwargs)\n', (518, 558), False, 'import json\n'), ((634, 641), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (639, 641), False, 'from multiprocessing import cpu_count, Pool, Process, Queue\n'), ((1112, 1119), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1117, 1119), False, 'from multiprocessing import cpu_count, Pool, Process, Queue\n'), ((1934, 1955), 'hashlib.sha256', 'hashlib.sha256', (['guess'], {}), '(guess)\n', (1948, 1955), False, 'import hashlib\n'), ((648, 655), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (652, 655), False, 'from multiprocessing import cpu_count, Pool, Process, Queue\n'), ((1137, 1226), 'multiprocessing.Process', 'Process', ([], {'target': 'process_worker', 'args': '(queue, last_proof, last_hash, difficulty, step)'}), '(target=process_worker, args=(queue, last_proof, last_hash,\n difficulty, step))\n', (1144, 1226), False, 'from multiprocessing import cpu_count, Pool, Process, Queue\n'), ((1277, 1288), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1286, 1288), False, 'from multiprocessing import cpu_count, Pool, Process, Queue\n'), ((778, 795), 'itertools.count', 'itertools.count', ([], {}), '()\n', (793, 795), False, 'import itertools\n')] |
#
# Copyright 2018-2021 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from types import SimpleNamespace
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import yaml
from elyra.pipeline.component import Component
from elyra.pipeline.component import ComponentParameter
from elyra.pipeline.component import ComponentParser
class KfpComponentParser(ComponentParser):
_component_platform = "kfp"
_file_types = [".yaml"]
def parse(self, registry_entry: SimpleNamespace) -> Optional[List[Component]]:
# Get YAML object from component definition
component_yaml = self._read_component_yaml(registry_entry)
if not component_yaml:
return None
# Assign component_id and description
component_id = self.get_component_id(registry_entry.location, component_yaml.get('name', ''))
description = ""
if component_yaml.get('description'):
# Remove whitespace characters and replace with spaces
description = ' '.join(component_yaml.get('description').split())
component_properties = self._parse_properties(component_yaml)
component = Component(id=component_id,
name=component_yaml.get('name'),
description=description,
runtime=self.component_platform,
location_type=registry_entry.location_type,
location=registry_entry.location,
properties=component_properties,
categories=registry_entry.categories)
return [component]
def _parse_properties(self, component_yaml: Dict[str, Any]) -> List[ComponentParameter]:
properties: List[ComponentParameter] = list()
# NOTE: Currently no runtime-specific properties are needed
# properties.extend(self.get_runtime_specific_properties())
# Then loop through and create custom properties
input_params = component_yaml.get('inputs', [])
for param in input_params:
# KFP components default to being required unless otherwise stated.
# Reference: https://www.kubeflow.org/docs/components/pipelines/reference/component-spec/#interface
required = True
if "optional" in param and param.get('optional') is True:
required = False
# Assign type, default to string
data_type = param.get('type', 'string')
# Set description and include parsed type information
description = self._format_description(description=param.get('description', ''),
data_type=data_type)
# Change type to reflect the type of input (inputValue vs inputPath)
data_type = self._get_adjusted_parameter_fields(component_body=component_yaml,
io_object_name=param.get('name'),
io_object_type="input",
parameter_type=data_type)
data_type, control_id, default_value = self.determine_type_information(data_type)
# Get value if provided
value = param.get('default', '')
ref = param.get('name').lower().replace(' ', '_')
properties.append(ComponentParameter(id=ref,
name=param.get('name'),
data_type=data_type,
value=(value or default_value),
description=description,
control_id=control_id,
required=required))
return properties
def get_runtime_specific_properties(self) -> List[ComponentParameter]:
"""
Define properties that are common to the KFP runtime.
"""
return [
ComponentParameter(
id="runtime_image",
name="Runtime Image",
data_type="string",
value="",
description="Docker image used as execution environment.",
control="readonly",
required=True,
)
]
def _read_component_yaml(self, registry_entry: SimpleNamespace) -> Optional[Dict[str, Any]]:
"""
Convert component_definition string to YAML object
"""
try:
return yaml.safe_load(registry_entry.component_definition)
except Exception as e:
self.log.debug(f"Could not read definition for component at "
f"location: '{registry_entry.location}' -> {str(e)}")
return None
def _get_adjusted_parameter_fields(self,
component_body: Dict[str, Any],
io_object_name: str,
io_object_type: str,
parameter_type: str) -> str:
"""
Change the parameter ref according if it is a KFP path parameter (as opposed to a value parameter)
"""
adjusted_type = parameter_type
if "implementation" in component_body and "container" in component_body['implementation']:
if "command" in component_body['implementation']['container']:
for command in component_body['implementation']['container']['command']:
if isinstance(command, dict) and list(command.values())[0] == io_object_name and \
list(command.keys())[0] == f"{io_object_type}Path":
adjusted_type = "file"
if "args" in component_body['implementation']['container']:
for arg in component_body['implementation']['container']['args']:
if isinstance(arg, dict) and list(arg.values())[0] == io_object_name and \
list(arg.keys())[0] == f"{io_object_type}Path":
adjusted_type = "file"
return adjusted_type
| [
"yaml.safe_load",
"elyra.pipeline.component.ComponentParameter"
] | [((4687, 4885), 'elyra.pipeline.component.ComponentParameter', 'ComponentParameter', ([], {'id': '"""runtime_image"""', 'name': '"""Runtime Image"""', 'data_type': '"""string"""', 'value': '""""""', 'description': '"""Docker image used as execution environment."""', 'control': '"""readonly"""', 'required': '(True)'}), "(id='runtime_image', name='Runtime Image', data_type=\n 'string', value='', description=\n 'Docker image used as execution environment.', control='readonly',\n required=True)\n", (4705, 4885), False, 'from elyra.pipeline.component import ComponentParameter\n'), ((5222, 5273), 'yaml.safe_load', 'yaml.safe_load', (['registry_entry.component_definition'], {}), '(registry_entry.component_definition)\n', (5236, 5273), False, 'import yaml\n')] |
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from common import MayanAppConfig
from .licenses import * # NOQA
class MIMETypesApp(MayanAppConfig):
name = 'mimetype'
verbose_name = _('MIME types')
def ready(self, *args, **kwargs):
super(MIMETypesApp, self).ready(*args, **kwargs)
| [
"django.utils.translation.ugettext_lazy"
] | [((244, 259), 'django.utils.translation.ugettext_lazy', '_', (['"""MIME types"""'], {}), "('MIME types')\n", (245, 259), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
from packages.levels.Level import Level
import packages.levels.levels as Levels
import packages.resources.functions as function
import packages.resources.variables as var
from packages.filesystem.Directory import Directory
from packages.filesystem.File import File
var.bash_history = ("Check")
test = Level("Instruct", "Help", ("Check"))
test.instruct()
test.help()
print(test.check()) | [
"packages.levels.Level.Level"
] | [((303, 337), 'packages.levels.Level.Level', 'Level', (['"""Instruct"""', '"""Help"""', '"""Check"""'], {}), "('Instruct', 'Help', 'Check')\n", (308, 337), False, 'from packages.levels.Level import Level\n')] |
# noinspection PyUnresolvedReferences
import unittest
from cython_vst_loader.vst_loader_wrapper import allocate_float_buffer, get_float_buffer_as_list, \
free_buffer, \
allocate_double_buffer, get_double_buffer_as_list
class TestBuffers(unittest.TestCase):
def test_float_buffer(self):
pointer = allocate_float_buffer(10, 12.345)
assert (pointer > 1000) # something like a pointer
list_object = get_float_buffer_as_list(pointer, 10)
assert (isinstance(list_object, list))
assert (len(list_object) == 10)
for element in list_object:
assert (self.roughly_equals(element, 12.345))
free_buffer(pointer)
def test_double_buffer(self):
pointer = allocate_double_buffer(10, 12.345)
assert (pointer > 1000) # something like a pointer
list_object = get_double_buffer_as_list(pointer, 10)
assert (isinstance(list_object, list))
assert (len(list_object) == 10)
for element in list_object:
assert (self.roughly_equals(element, 12.345))
free_buffer(pointer)
def roughly_equals(self, a: float, b: float) -> bool:
tolerance: float = 0.00001
return abs(a - b) < tolerance
| [
"cython_vst_loader.vst_loader_wrapper.free_buffer",
"cython_vst_loader.vst_loader_wrapper.get_double_buffer_as_list",
"cython_vst_loader.vst_loader_wrapper.allocate_float_buffer",
"cython_vst_loader.vst_loader_wrapper.get_float_buffer_as_list",
"cython_vst_loader.vst_loader_wrapper.allocate_double_buffer"
] | [((320, 353), 'cython_vst_loader.vst_loader_wrapper.allocate_float_buffer', 'allocate_float_buffer', (['(10)', '(12.345)'], {}), '(10, 12.345)\n', (341, 353), False, 'from cython_vst_loader.vst_loader_wrapper import allocate_float_buffer, get_float_buffer_as_list, free_buffer, allocate_double_buffer, get_double_buffer_as_list\n'), ((436, 473), 'cython_vst_loader.vst_loader_wrapper.get_float_buffer_as_list', 'get_float_buffer_as_list', (['pointer', '(10)'], {}), '(pointer, 10)\n', (460, 473), False, 'from cython_vst_loader.vst_loader_wrapper import allocate_float_buffer, get_float_buffer_as_list, free_buffer, allocate_double_buffer, get_double_buffer_as_list\n'), ((663, 683), 'cython_vst_loader.vst_loader_wrapper.free_buffer', 'free_buffer', (['pointer'], {}), '(pointer)\n', (674, 683), False, 'from cython_vst_loader.vst_loader_wrapper import allocate_float_buffer, get_float_buffer_as_list, free_buffer, allocate_double_buffer, get_double_buffer_as_list\n'), ((737, 771), 'cython_vst_loader.vst_loader_wrapper.allocate_double_buffer', 'allocate_double_buffer', (['(10)', '(12.345)'], {}), '(10, 12.345)\n', (759, 771), False, 'from cython_vst_loader.vst_loader_wrapper import allocate_float_buffer, get_float_buffer_as_list, free_buffer, allocate_double_buffer, get_double_buffer_as_list\n'), ((854, 892), 'cython_vst_loader.vst_loader_wrapper.get_double_buffer_as_list', 'get_double_buffer_as_list', (['pointer', '(10)'], {}), '(pointer, 10)\n', (879, 892), False, 'from cython_vst_loader.vst_loader_wrapper import allocate_float_buffer, get_float_buffer_as_list, free_buffer, allocate_double_buffer, get_double_buffer_as_list\n'), ((1082, 1102), 'cython_vst_loader.vst_loader_wrapper.free_buffer', 'free_buffer', (['pointer'], {}), '(pointer)\n', (1093, 1102), False, 'from cython_vst_loader.vst_loader_wrapper import allocate_float_buffer, get_float_buffer_as_list, free_buffer, allocate_double_buffer, get_double_buffer_as_list\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import uuid
import requests
class MicrosoftTranslator:
def __init__(self, subscription_key: str, subscription_region: str):
self.subscription_key = subscription_key
self.subscription_region = subscription_region
# Don't forget to replace with your Cog Services location!
# Our Flask route will supply two arguments: text_input and language_output.
# When the translate text button is pressed in our Flask app, the Ajax request
# will grab these values from our web app, and use them in the request.
# See main.js for Ajax calls.
async def translate(self, text_input, language_output):
base_url = "https://api.cognitive.microsofttranslator.com"
path = "/translate?api-version=3.0"
params = "&to=" + language_output
constructed_url = base_url + path + params
headers = {
"Ocp-Apim-Subscription-Key": self.subscription_key,
"Ocp-Apim-Subscription-Region": self.subscription_region,
"Content-type": "application/json",
"X-ClientTraceId": str(uuid.uuid4()),
}
# You can pass more than one object in body.
body = [{"text": text_input}]
response = requests.post(constructed_url, headers=headers, json=body)
json_response = response.json()
# for this sample, return the first translation
return json_response[0]["translations"][0]["text"]
| [
"requests.post",
"uuid.uuid4"
] | [((1305, 1363), 'requests.post', 'requests.post', (['constructed_url'], {'headers': 'headers', 'json': 'body'}), '(constructed_url, headers=headers, json=body)\n', (1318, 1363), False, 'import requests\n'), ((1169, 1181), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1179, 1181), False, 'import uuid\n')] |
# 经典面向对象的GUI写法
from tkinter import *
from tkinter import messagebox
class Application(Frame):
"""一个经典的GUI程序"""
def __init__(self,master=None):
super().__init__(master)
self.master = master
self.pack()
self.createWidget()
def createWidget(self):
"""创建组件"""
self.label01 = Label(self, text="用户名")
self.label01.pack()
# StringVar变量绑定到指定组件,双向关联
v1 = StringVar() # StringVar DoubleVar IntVar BooleanVar
self.entry01 = Entry(self, textvariable=v1)
self.entry01.pack()
v1.set("admin")
# 创建密码框
self.label02 = Label(self, text="密码")
self.label02.pack()
# StringVar变量绑定到指定组件,双向关联
v2 = StringVar() # StringVar DoubleVar IntVar BooleanVar
self.entry02 = Entry(self, textvariable=v2, show="*")
self.entry02.pack()
self.btn01 = Button(self, text="登录", command=self.login)
self.btn01.pack()
# 创建一个退出按钮
self.btnQuit = Button(self, text="退出", command=self.master.destroy)
self.btnQuit.pack()
def login(self):
username = self.entry01.get()
pwd = self.entry02.get()
print("用户名:"+username)
print("密码:"+pwd)
if username == "lhy" and pwd == "<PASSWORD>":
messagebox.showinfo("登录界面", "您已登录,欢迎")
else:
messagebox.showinfo("登录界面", "密码错误")
if __name__ == '__main__':
root = Tk()
root.geometry("1280x720+200+300")
root.title("")
app = Application(master=root)
root.mainloop()
| [
"tkinter.messagebox.showinfo"
] | [((1318, 1356), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""登录界面"""', '"""您已登录,欢迎"""'], {}), "('登录界面', '您已登录,欢迎')\n", (1337, 1356), False, 'from tkinter import messagebox\n'), ((1383, 1418), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""登录界面"""', '"""密码错误"""'], {}), "('登录界面', '密码错误')\n", (1402, 1418), False, 'from tkinter import messagebox\n')] |
# Grupo da Maioridade
'''Crie um programa que leia o ANO DE NASCIMENTO de
SETE PESSOAS. No final, mostre quantas pessoas ainda
não atingiram a maioridade e quantas já são maiores'''
from datetime import date
anoatual = date.today().year # Pegará o ano atual configurado na máquina
totalmaior = 0
totalmenor = 0
for pessoas in range(1, 8):
anonasc = int(input('Digite o ano de nascimento da {}ª pessoa: '.format(pessoas)))
if 1900 < anonasc < anoatual:
idade = anoatual - anonasc
if idade >= 21:
totalmaior += 1
else:
totalmenor += 1
else:
print('\033[31m''Ocorreu um erro no ano em que você digitou! Tente novamente.')
print('Há {} pessoas neste grupo que estão na maioridade'.format(totalmaior))
print('E há {} pessoas que ainda são menor de idade'.format(totalmenor))
| [
"datetime.date.today"
] | [((220, 232), 'datetime.date.today', 'date.today', ([], {}), '()\n', (230, 232), False, 'from datetime import date\n')] |
from typing import Tuple
import numpy as np
import png
from skimage.transform import resize
def load_world(filename: str, size: Tuple[int, int], resolution: int) -> np.array:
"""Load a preconstructred track to initialize world.
Args:
filename: Full path to the track file (png).
size: Width and height of the map
resolution: Resolution of the grid map (i.e. into how many cells)
one meter is divided into.
Returns:
An initialized gridmap based on the preconstructed track as
an n x m dimensional numpy array, where n is the width (num cells)
and m the height (num cells) - (after applying resolution).
"""
width_in_cells, height_in_cells = np.multiply(size, resolution)
world = np.array(png_to_ogm(
filename, normalized=True, origin='lower'))
# If the image is already in our desired shape, no need to rescale it
if world.shape == (height_in_cells, width_in_cells):
return world
# Otherwise, scale the image to our desired size.
resized_world = resize(world, (width_in_cells, height_in_cells))
return resized_world
def png_to_ogm(filename, normalized=False, origin='lower'):
"""Convert a png image to occupancy grid map.
Inspired by https://github.com/richardos/occupancy-grid-a-star
Args:
filename: Path to the png file.
normalized: Whether to normalize the data, i.e. to be in value range [0, 1]
origin: Point of origin (0,0)
Returns:
2D Array
"""
r = png.Reader(filename)
img = r.read()
img_data = list(img[2])
out_img = []
bitdepth = img[3]['bitdepth']
for i in range(len(img_data)):
out_img_row = []
for j in range(len(img_data[0])):
if j % img[3]['planes'] == 0:
if normalized:
out_img_row.append(img_data[i][j]*1.0/(2**bitdepth))
else:
out_img_row.append(img_data[i][j])
out_img.append(out_img_row)
if origin == 'lower':
out_img.reverse()
return out_img
| [
"numpy.multiply",
"skimage.transform.resize",
"png.Reader"
] | [((778, 807), 'numpy.multiply', 'np.multiply', (['size', 'resolution'], {}), '(size, resolution)\n', (789, 807), True, 'import numpy as np\n'), ((1122, 1170), 'skimage.transform.resize', 'resize', (['world', '(width_in_cells, height_in_cells)'], {}), '(world, (width_in_cells, height_in_cells))\n', (1128, 1170), False, 'from skimage.transform import resize\n'), ((1604, 1624), 'png.Reader', 'png.Reader', (['filename'], {}), '(filename)\n', (1614, 1624), False, 'import png\n')] |
#! /usr/bin/env python
# Version: 0.1.1
import re
def convert_ws_header_vb_attributes(df):
output_txt = ""
for key in df.keys():
variant_array = "\'Dim "
i = 0
for word in [w.capitalize().replace('\t', '') for w in str(key).lower().split("(")[0].split(" ")]:
if i == 0:
word = word.lower()
if word != "" and word != "-":
variant_array += word
i += 1
variant_array = variant_array.rstrip()
variant_array += "() As Variant\n"
output_txt += variant_array
return output_txt
def remove_special_characters(text):
_vec = ''.join(re.split(r'[^a-zA-Z]', text)).split()
if len(_vec) == 1:
return _vec[0]
else:
return text
| [
"re.split"
] | [((660, 687), 're.split', 're.split', (['"""[^a-zA-Z]"""', 'text'], {}), "('[^a-zA-Z]', text)\n", (668, 687), False, 'import re\n')] |
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import IncrementalPCA as _IncrementalPCA
from ..count_matrix.zarr import dataset_to_array
def _normalize_per_cell(matrix, cell_sum):
print('normalize per cell to CPM')
if cell_sum is None:
norm_vec = (matrix.sum(axis=1) + 1) / 1000000
else:
norm_vec = cell_sum / 1000000
norm_vec = norm_vec.values
norm_vec = norm_vec.astype(np.float32)
matrix /= norm_vec[:, None]
return matrix
class IncrementalPCA:
def __init__(self, n_components=100, sparse=False, normalize_per_cell=True, log1p=True, scale=True, **kwargs):
self.pca = _IncrementalPCA(n_components=n_components, **kwargs)
self.sparse = sparse
self.normalize_per_cell = normalize_per_cell
self.log1p = log1p
self.scale = scale
self.scaler = None
self.cell_sum = None
self.use_features = None
self.obs_dim = None
self.var_dim = None
self.load_chunk = None
self._fit = False
return
def fit(self,
ds,
use_cells=None,
use_features=None,
chunk=500000,
cell_sum=None,
var_dim='gene',
obs_dim='cell',
load_chunk=None,
random_shuffle=True):
self.cell_sum = cell_sum
self.use_features = use_features
self.obs_dim = obs_dim
self.var_dim = var_dim
self.load_chunk = chunk if load_chunk is None else load_chunk
# prepare index
cell_index = ds.get_index(obs_dim)
if use_cells is not None:
cell_index = cell_index[cell_index.isin(use_cells)].copy()
# random shuffle to make fitting more stable
if random_shuffle:
cell_order = cell_index.tolist()
np.random.shuffle(cell_order)
cell_order = pd.Index(cell_order)
else:
cell_order = cell_index
# fit by chunks
chunk_stds = []
chunk_means = []
for chunk_start in range(0, cell_order.size, chunk):
print(f'Fitting {chunk_start}-{chunk_start + chunk}')
_chunk_cells = cell_order[chunk_start:chunk_start + chunk]
_chunk_matrix, _chunk_cells, _chunk_genes = dataset_to_array(
ds,
use_cells=_chunk_cells,
use_genes=use_features,
sparse=self.sparse,
obs_dim=obs_dim,
var_dim=var_dim,
chunk=self.load_chunk)
if cell_sum is not None:
_chunk_cell_sum = cell_sum.loc[_chunk_cells]
else:
_chunk_cell_sum = None
_chunk_matrix = _chunk_matrix.astype(np.float32)
# normalize cell counts
if self.normalize_per_cell:
_chunk_matrix = _normalize_per_cell(matrix=_chunk_matrix,
cell_sum=_chunk_cell_sum)
# log transfer
if self.log1p:
print('log1p transform')
_chunk_matrix = np.log1p(_chunk_matrix)
# scale
if self.scale:
print('Scale')
if self.scaler is None:
# assume the chunk is large enough, so only use the first chunk to fit
# e.g., 5,000,000 cells
self.scaler = StandardScaler(with_mean=not self.sparse)
_chunk_matrix = self.scaler.fit_transform(_chunk_matrix)
else:
# transform remaining cells
_chunk_matrix = self.scaler.transform(_chunk_matrix)
# save chunk stats for checking robustness
chunk_stds.append(_chunk_matrix.std(axis=0))
chunk_means.append(_chunk_matrix.mean(axis=0))
# fit IncrementalPCA
print('Fit PCA')
self.pca.partial_fit(_chunk_matrix)
self._fit = True
return
def transform(self, ds, use_cells=None, chunk=100000):
if not self._fit:
raise ValueError('fit first before transform')
cell_index = ds.get_index(self.obs_dim)
if use_cells is not None:
cell_index = cell_index[cell_index.isin(use_cells)].copy()
total_pcs = []
for chunk_start in range(0, cell_index.size, chunk):
print(f'Transforming {chunk_start}-{chunk_start + chunk}')
_chunk_cells = cell_index[chunk_start:chunk_start + chunk]
_chunk_matrix, _chunk_cells, _chunk_genes = dataset_to_array(
ds,
use_cells=_chunk_cells,
use_genes=self.use_features,
sparse=self.sparse,
obs_dim=self.obs_dim,
var_dim=self.var_dim,
chunk=self.load_chunk)
if self.cell_sum is not None:
_chunk_cell_sum = self.cell_sum.loc[_chunk_cells]
else:
_chunk_cell_sum = None
_chunk_matrix = _chunk_matrix.astype(np.float32)
# normalize cell counts
if self.normalize_per_cell:
_chunk_matrix = _normalize_per_cell(matrix=_chunk_matrix,
cell_sum=_chunk_cell_sum)
# log transfer
if self.log1p:
print('log1p transform')
_chunk_matrix = np.log1p(_chunk_matrix)
# scale
if self.scale:
print('Scale')
if self.scaler is None:
# this shouldn't happen in transform
raise ValueError('scale is True, but scaler not exist')
else:
# transform remaining cells
_chunk_matrix = self.scaler.transform(_chunk_matrix)
# transform
print('Transform PCA')
pcs = self.pca.transform(_chunk_matrix)
pcs = pd.DataFrame(pcs, index=_chunk_cells)
total_pcs.append(pcs)
total_pcs = pd.concat(total_pcs)
return total_pcs
def fit_transform(self, ds,
use_cells=None,
use_features=None,
chunk=500000,
cell_sum=None,
var_dim='gene',
obs_dim='cell',
load_chunk=None,
random_shuffle=True):
self.fit(ds,
use_cells=use_cells,
use_features=use_features,
chunk=chunk,
cell_sum=cell_sum,
var_dim=var_dim,
obs_dim=obs_dim,
load_chunk=load_chunk,
random_shuffle=random_shuffle)
total_pcs = self.transform(ds, use_cells=use_cells, chunk=self.load_chunk)
return total_pcs
| [
"pandas.Index",
"sklearn.preprocessing.StandardScaler",
"pandas.DataFrame",
"sklearn.decomposition.IncrementalPCA",
"numpy.log1p",
"pandas.concat",
"numpy.random.shuffle"
] | [((702, 754), 'sklearn.decomposition.IncrementalPCA', '_IncrementalPCA', ([], {'n_components': 'n_components'}), '(n_components=n_components, **kwargs)\n', (717, 754), True, 'from sklearn.decomposition import IncrementalPCA as _IncrementalPCA\n'), ((6155, 6175), 'pandas.concat', 'pd.concat', (['total_pcs'], {}), '(total_pcs)\n', (6164, 6175), True, 'import pandas as pd\n'), ((1891, 1920), 'numpy.random.shuffle', 'np.random.shuffle', (['cell_order'], {}), '(cell_order)\n', (1908, 1920), True, 'import numpy as np\n'), ((1946, 1966), 'pandas.Index', 'pd.Index', (['cell_order'], {}), '(cell_order)\n', (1954, 1966), True, 'import pandas as pd\n'), ((6063, 6100), 'pandas.DataFrame', 'pd.DataFrame', (['pcs'], {'index': '_chunk_cells'}), '(pcs, index=_chunk_cells)\n', (6075, 6100), True, 'import pandas as pd\n'), ((3177, 3200), 'numpy.log1p', 'np.log1p', (['_chunk_matrix'], {}), '(_chunk_matrix)\n', (3185, 3200), True, 'import numpy as np\n'), ((5514, 5537), 'numpy.log1p', 'np.log1p', (['_chunk_matrix'], {}), '(_chunk_matrix)\n', (5522, 5537), True, 'import numpy as np\n'), ((3489, 3530), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(not self.sparse)'}), '(with_mean=not self.sparse)\n', (3503, 3530), False, 'from sklearn.preprocessing import StandardScaler\n')] |
# Predict time series w/o using OutputProjectWrapper
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Create time series
t_min, t_max = 0, 30
resolution = 0.1
def time_series(t):
return t * np.sin(t) / 3 + 2 * np.sin(t * 5)
def next_batch(batch_size, n_steps):
t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)
Ts = t0 + np.arange(0., n_steps + 1) * resolution
ys = time_series(Ts)
return ys[:,:-1].reshape(-1, n_steps, 1), ys[:,1:].reshape(-1, n_steps, 1)
t = np.linspace(t_min, t_max, (t_max - t_min) // resolution)
n_steps = 20
t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons,
activation=tf.nn.relu)
rnn_outputs, states = tf.nn.dynamic_rnn(basic_cell, X,
dtype=tf.float32)
learning_rate = 0.001
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
loss = tf.reduce_sum(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
n_iterations = 1000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
print(y_pred)
# Generat a creative new seq
n_iterations = 2000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
sequence1 = [0. for j in range(n_steps)]
for k in range(len(t) - n_steps):
X_batch = np.array(sequence1[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence1.append(y_pred[0, -1, 0])
sequence2 = [time_series(i * resolution + t_min + (t_max-t_min/3)) for i in range(n_steps)]
for j in range(len(t) - n_steps):
X_batch = np.array(sequence2[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence2.append(y_pred[0, -1, 0])
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(t, sequence1, 'b-')
plt.plot(t[:n_steps],sequence1[:n_steps], 'b-', linewidth=3)
plt.xlabel('Time')
plt.ylabel('Value')
plt.subplot(122)
plt.plot(t, sequence2, 'b-')
plt.plot(t[:n_steps], sequence2[:n_steps], 'b-', linewidth=3)
plt.xlabel('Time')
plt.show() | [
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.sin",
"numpy.arange",
"tensorflow.placeholder",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"tensorflow.Session",
"tensorflow.nn.dynamic_rnn",
"numpy.linspace",
"tensorflow.square",
"tensorflow.train.AdamOptimiz... | [((540, 596), 'numpy.linspace', 'np.linspace', (['t_min', 't_max', '((t_max - t_min) // resolution)'], {}), '(t_min, t_max, (t_max - t_min) // resolution)\n', (551, 596), True, 'import numpy as np\n'), ((624, 689), 'numpy.linspace', 'np.linspace', (['(12.2)', '(12.2 + resolution * (n_steps + 1))', '(n_steps + 1)'], {}), '(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)\n', (635, 689), True, 'import numpy as np\n'), ((739, 792), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, n_steps, n_inputs]'], {}), '(tf.float32, [None, n_steps, n_inputs])\n', (753, 792), True, 'import tensorflow as tf\n'), ((797, 851), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, n_steps, n_outputs]'], {}), '(tf.float32, [None, n_steps, n_outputs])\n', (811, 851), True, 'import tensorflow as tf\n'), ((866, 937), 'tensorflow.contrib.rnn.BasicRNNCell', 'tf.contrib.rnn.BasicRNNCell', ([], {'num_units': 'n_neurons', 'activation': 'tf.nn.relu'}), '(num_units=n_neurons, activation=tf.nn.relu)\n', (893, 937), True, 'import tensorflow as tf\n'), ((964, 1014), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['basic_cell', 'X'], {'dtype': 'tf.float32'}), '(basic_cell, X, dtype=tf.float32)\n', (981, 1014), True, 'import tensorflow as tf\n'), ((1065, 1105), 'tensorflow.reshape', 'tf.reshape', (['rnn_outputs', '[-1, n_neurons]'], {}), '(rnn_outputs, [-1, n_neurons])\n', (1075, 1105), True, 'import tensorflow as tf\n'), ((1124, 1171), 'tensorflow.layers.dense', 'tf.layers.dense', (['stacked_rnn_outputs', 'n_outputs'], {}), '(stacked_rnn_outputs, n_outputs)\n', (1139, 1171), True, 'import tensorflow as tf\n'), ((1182, 1235), 'tensorflow.reshape', 'tf.reshape', (['stacked_outputs', '[-1, n_steps, n_outputs]'], {}), '(stacked_outputs, [-1, n_steps, n_outputs])\n', (1192, 1235), True, 'import tensorflow as tf\n'), ((1294, 1345), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1316, 1345), True, 'import tensorflow as tf\n'), ((1393, 1426), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1424, 1426), True, 'import tensorflow as tf\n'), ((2915, 2942), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 4)'}), '(figsize=(11, 4))\n', (2925, 2942), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2958), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (2953, 2958), True, 'import matplotlib.pyplot as plt\n'), ((2959, 2987), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sequence1', '"""b-"""'], {}), "(t, sequence1, 'b-')\n", (2967, 2987), True, 'import matplotlib.pyplot as plt\n'), ((2988, 3049), 'matplotlib.pyplot.plot', 'plt.plot', (['t[:n_steps]', 'sequence1[:n_steps]', '"""b-"""'], {'linewidth': '(3)'}), "(t[:n_steps], sequence1[:n_steps], 'b-', linewidth=3)\n", (2996, 3049), True, 'import matplotlib.pyplot as plt\n'), ((3049, 3067), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (3059, 3067), True, 'import matplotlib.pyplot as plt\n'), ((3068, 3087), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value"""'], {}), "('Value')\n", (3078, 3087), True, 'import matplotlib.pyplot as plt\n'), ((3089, 3105), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (3100, 3105), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3134), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sequence2', '"""b-"""'], {}), "(t, sequence2, 'b-')\n", (3114, 3134), True, 'import matplotlib.pyplot as plt\n'), ((3135, 3196), 'matplotlib.pyplot.plot', 'plt.plot', (['t[:n_steps]', 'sequence2[:n_steps]', '"""b-"""'], {'linewidth': '(3)'}), "(t[:n_steps], sequence2[:n_steps], 'b-', linewidth=3)\n", (3143, 3196), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3215), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (3207, 3215), True, 'import matplotlib.pyplot as plt\n'), ((3216, 3226), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3224, 3226), True, 'import matplotlib.pyplot as plt\n'), ((1258, 1280), 'tensorflow.square', 'tf.square', (['(outputs - y)'], {}), '(outputs - y)\n', (1267, 1280), True, 'import tensorflow as tf\n'), ((1470, 1482), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1480, 1482), True, 'import tensorflow as tf\n'), ((2017, 2029), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2027, 2029), True, 'import tensorflow as tf\n'), ((306, 335), 'numpy.random.rand', 'np.random.rand', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (320, 335), True, 'import numpy as np\n'), ((245, 258), 'numpy.sin', 'np.sin', (['(t * 5)'], {}), '(t * 5)\n', (251, 258), True, 'import numpy as np\n'), ((391, 418), 'numpy.arange', 'np.arange', (['(0.0)', '(n_steps + 1)'], {}), '(0.0, n_steps + 1)\n', (400, 418), True, 'import numpy as np\n'), ((225, 234), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (231, 234), True, 'import numpy as np\n'), ((2445, 2475), 'numpy.array', 'np.array', (['sequence1[-n_steps:]'], {}), '(sequence1[-n_steps:])\n', (2453, 2475), True, 'import numpy as np\n'), ((2758, 2788), 'numpy.array', 'np.array', (['sequence2[-n_steps:]'], {}), '(sequence2[-n_steps:])\n', (2766, 2788), True, 'import numpy as np\n')] |
import os
from argparse import ArgumentParser
from glob import glob
import cv2
import numpy as np
import torch
import torchvision
import matplotlib as mpl
import matplotlib.pyplot as plt
from PIL import Image
from fiery.trainer import TrainingModule
from fiery.utils.network import NormalizeInverse
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
from fiery.utils.visualisation import plot_instance_map, generate_instance_colours, make_contour, convert_figure_numpy
EXAMPLE_DATA_PATH = 'example_data'
def plot_prediction(image, output, cfg):
# Process predictions
consistent_instance_seg, matched_centers = predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=True
)
# Plot future trajectories
unique_ids = torch.unique(consistent_instance_seg[0, 0]).cpu().long().numpy()[1:]
instance_map = dict(zip(unique_ids, unique_ids))
instance_colours = generate_instance_colours(instance_map)
vis_image = plot_instance_map(consistent_instance_seg[0, 0].cpu().numpy(), instance_map)
trajectory_img = np.zeros(vis_image.shape, dtype=np.uint8)
for instance_id in unique_ids:
path = matched_centers[instance_id]
for t in range(len(path) - 1):
color = instance_colours[instance_id].tolist()
cv2.line(trajectory_img, tuple(path[t]), tuple(path[t + 1]),
color, 4)
# Overlay arrows
temp_img = cv2.addWeighted(vis_image, 0.7, trajectory_img, 0.3, 1.0)
mask = ~ np.all(trajectory_img == 0, axis=2)
vis_image[mask] = temp_img[mask]
# Plot present RGB frames and predictions
val_w = 2.99
cameras = cfg.IMAGE.NAMES
image_ratio = cfg.IMAGE.FINAL_DIM[0] / cfg.IMAGE.FINAL_DIM[1]
val_h = val_w * image_ratio
fig = plt.figure(figsize=(4 * val_w, 2 * val_h))
width_ratios = (val_w, val_w, val_w, val_w)
gs = mpl.gridspec.GridSpec(2, 4, width_ratios=width_ratios)
gs.update(wspace=0.0, hspace=0.0, left=0.0, right=1.0, top=1.0, bottom=0.0)
denormalise_img = torchvision.transforms.Compose(
(NormalizeInverse(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
torchvision.transforms.ToPILImage(),)
)
for imgi, img in enumerate(image[0, -1]):
ax = plt.subplot(gs[imgi // 3, imgi % 3])
showimg = denormalise_img(img.cpu())
if imgi > 2:
showimg = showimg.transpose(Image.FLIP_LEFT_RIGHT)
plt.annotate(cameras[imgi].replace('_', ' ').replace('CAM ', ''), (0.01, 0.87), c='white',
xycoords='axes fraction', fontsize=14)
plt.imshow(showimg)
plt.axis('off')
ax = plt.subplot(gs[:, 3])
plt.imshow(make_contour(vis_image[::-1, ::-1]))
plt.axis('off')
plt.draw()
figure_numpy = convert_figure_numpy(fig)
plt.close()
return figure_numpy
def download_example_data():
from requests import get
def download(url, file_name):
# open in binary mode
with open(file_name, "wb") as file:
# get request
response = get(url)
# write to file
file.write(response.content)
if not os.path.exists(EXAMPLE_DATA_PATH):
os.makedirs(EXAMPLE_DATA_PATH, exist_ok=True)
url_list = ['https://github.com/wayveai/fiery/releases/download/v1.0/example_1.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_2.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_3.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_4.npz'
]
for url in url_list:
download(url, os.path.join(EXAMPLE_DATA_PATH, os.path.basename(url)))
def visualise(checkpoint_path):
trainer = TrainingModule.load_from_checkpoint(checkpoint_path, strict=True)
device = torch.device('cuda:0')
trainer = trainer.to(device)
trainer.eval()
# Download example data
download_example_data()
# Load data
for data_path in sorted(glob(os.path.join(EXAMPLE_DATA_PATH, '*.npz'))):
data = np.load(data_path)
image = torch.from_numpy(data['image']).to(device)
intrinsics = torch.from_numpy(data['intrinsics']).to(device)
extrinsics = torch.from_numpy(data['extrinsics']).to(device)
future_egomotions = torch.from_numpy(data['future_egomotion']).to(device)
# Forward pass
with torch.no_grad():
output = trainer.model(image, intrinsics, extrinsics, future_egomotions)
figure_numpy = plot_prediction(image, output, trainer.cfg)
os.makedirs('./output_vis', exist_ok=True)
output_filename = os.path.join('./output_vis', os.path.basename(data_path).split('.')[0]) + '.png'
Image.fromarray(figure_numpy).save(output_filename)
print(f'Saved output in {output_filename}')
if __name__ == '__main__':
parser = ArgumentParser(description='Fiery visualisation')
parser.add_argument('--checkpoint', default='./fiery.ckpt', type=str, help='path to checkpoint')
args = parser.parse_args()
visualise(args.checkpoint)
| [
"fiery.trainer.TrainingModule.load_from_checkpoint",
"fiery.utils.network.NormalizeInverse",
"fiery.utils.instance.predict_instance_segmentation_and_trajectories",
"torchvision.transforms.ToPILImage",
"fiery.utils.visualisation.make_contour",
"torch.from_numpy",
"matplotlib.pyplot.imshow",
"fiery.util... | [((652, 740), 'fiery.utils.instance.predict_instance_segmentation_and_trajectories', 'predict_instance_segmentation_and_trajectories', (['output'], {'compute_matched_centers': '(True)'}), '(output,\n compute_matched_centers=True)\n', (698, 740), False, 'from fiery.utils.instance import predict_instance_segmentation_and_trajectories\n'), ((945, 984), 'fiery.utils.visualisation.generate_instance_colours', 'generate_instance_colours', (['instance_map'], {}), '(instance_map)\n', (970, 984), False, 'from fiery.utils.visualisation import plot_instance_map, generate_instance_colours, make_contour, convert_figure_numpy\n'), ((1099, 1140), 'numpy.zeros', 'np.zeros', (['vis_image.shape'], {'dtype': 'np.uint8'}), '(vis_image.shape, dtype=np.uint8)\n', (1107, 1140), True, 'import numpy as np\n'), ((1459, 1516), 'cv2.addWeighted', 'cv2.addWeighted', (['vis_image', '(0.7)', 'trajectory_img', '(0.3)', '(1.0)'], {}), '(vis_image, 0.7, trajectory_img, 0.3, 1.0)\n', (1474, 1516), False, 'import cv2\n'), ((1805, 1847), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4 * val_w, 2 * val_h)'}), '(figsize=(4 * val_w, 2 * val_h))\n', (1815, 1847), True, 'import matplotlib.pyplot as plt\n'), ((1905, 1959), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', (['(2)', '(4)'], {'width_ratios': 'width_ratios'}), '(2, 4, width_ratios=width_ratios)\n', (1926, 1959), True, 'import matplotlib as mpl\n'), ((2677, 2698), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:, 3]'], {}), '(gs[:, 3])\n', (2688, 2698), True, 'import matplotlib.pyplot as plt\n'), ((2755, 2770), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2763, 2770), True, 'import matplotlib.pyplot as plt\n'), ((2776, 2786), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2784, 2786), True, 'import matplotlib.pyplot as plt\n'), ((2806, 2831), 'fiery.utils.visualisation.convert_figure_numpy', 'convert_figure_numpy', (['fig'], {}), '(fig)\n', (2826, 2831), False, 'from fiery.utils.visualisation import plot_instance_map, generate_instance_colours, make_contour, convert_figure_numpy\n'), ((2836, 2847), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2845, 2847), True, 'import matplotlib.pyplot as plt\n'), ((3820, 3885), 'fiery.trainer.TrainingModule.load_from_checkpoint', 'TrainingModule.load_from_checkpoint', (['checkpoint_path'], {'strict': '(True)'}), '(checkpoint_path, strict=True)\n', (3855, 3885), False, 'from fiery.trainer import TrainingModule\n'), ((3900, 3922), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (3912, 3922), False, 'import torch\n'), ((4957, 5006), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Fiery visualisation"""'}), "(description='Fiery visualisation')\n", (4971, 5006), False, 'from argparse import ArgumentParser\n'), ((1530, 1565), 'numpy.all', 'np.all', (['(trajectory_img == 0)'], {'axis': '(2)'}), '(trajectory_img == 0, axis=2)\n', (1536, 1565), True, 'import numpy as np\n'), ((2289, 2325), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[imgi // 3, imgi % 3]'], {}), '(gs[imgi // 3, imgi % 3])\n', (2300, 2325), True, 'import matplotlib.pyplot as plt\n'), ((2623, 2642), 'matplotlib.pyplot.imshow', 'plt.imshow', (['showimg'], {}), '(showimg)\n', (2633, 2642), True, 'import matplotlib.pyplot as plt\n'), ((2651, 2666), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2659, 2666), True, 'import matplotlib.pyplot as plt\n'), ((2714, 2749), 'fiery.utils.visualisation.make_contour', 'make_contour', (['vis_image[::-1, ::-1]'], {}), '(vis_image[::-1, ::-1])\n', (2726, 2749), False, 'from fiery.utils.visualisation import plot_instance_map, generate_instance_colours, make_contour, convert_figure_numpy\n'), ((3179, 3212), 'os.path.exists', 'os.path.exists', (['EXAMPLE_DATA_PATH'], {}), '(EXAMPLE_DATA_PATH)\n', (3193, 3212), False, 'import os\n'), ((3222, 3267), 'os.makedirs', 'os.makedirs', (['EXAMPLE_DATA_PATH'], {'exist_ok': '(True)'}), '(EXAMPLE_DATA_PATH, exist_ok=True)\n', (3233, 3267), False, 'import os\n'), ((4140, 4158), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (4147, 4158), True, 'import numpy as np\n'), ((4653, 4695), 'os.makedirs', 'os.makedirs', (['"""./output_vis"""'], {'exist_ok': '(True)'}), "('./output_vis', exist_ok=True)\n", (4664, 4695), False, 'import os\n'), ((2104, 2175), 'fiery.utils.network.NormalizeInverse', 'NormalizeInverse', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2120, 2175), False, 'from fiery.utils.network import NormalizeInverse\n'), ((2186, 2221), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ([], {}), '()\n', (2219, 2221), False, 'import torchvision\n'), ((3090, 3098), 'requests.get', 'get', (['url'], {}), '(url)\n', (3093, 3098), False, 'from requests import get\n'), ((4081, 4121), 'os.path.join', 'os.path.join', (['EXAMPLE_DATA_PATH', '"""*.npz"""'], {}), "(EXAMPLE_DATA_PATH, '*.npz')\n", (4093, 4121), False, 'import os\n'), ((4475, 4490), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4488, 4490), False, 'import torch\n'), ((4175, 4206), 'torch.from_numpy', 'torch.from_numpy', (["data['image']"], {}), "(data['image'])\n", (4191, 4206), False, 'import torch\n'), ((4239, 4275), 'torch.from_numpy', 'torch.from_numpy', (["data['intrinsics']"], {}), "(data['intrinsics'])\n", (4255, 4275), False, 'import torch\n'), ((4308, 4344), 'torch.from_numpy', 'torch.from_numpy', (["data['extrinsics']"], {}), "(data['extrinsics'])\n", (4324, 4344), False, 'import torch\n'), ((4384, 4426), 'torch.from_numpy', 'torch.from_numpy', (["data['future_egomotion']"], {}), "(data['future_egomotion'])\n", (4400, 4426), False, 'import torch\n'), ((4811, 4840), 'PIL.Image.fromarray', 'Image.fromarray', (['figure_numpy'], {}), '(figure_numpy)\n', (4826, 4840), False, 'from PIL import Image\n'), ((3748, 3769), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (3764, 3769), False, 'import os\n'), ((4751, 4778), 'os.path.basename', 'os.path.basename', (['data_path'], {}), '(data_path)\n', (4767, 4778), False, 'import os\n'), ((800, 843), 'torch.unique', 'torch.unique', (['consistent_instance_seg[0, 0]'], {}), '(consistent_instance_seg[0, 0])\n', (812, 843), False, 'import torch\n')] |
import asyncio
import discord
from discord.ext import commands, tasks
import os
import random
import dotenv
import difflib
import configparser
###
version = '4.0.0'
###
bot = commands.Bot(command_prefix = '!', owner_id = 272446903940153345, intents = discord.Intents.all())
bot.remove_command('help')
config = configparser.ConfigParser()
config.read('settings.cfg')
dotenv.load_dotenv()
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
@bot.command(name='load')
@commands.is_owner()
async def load(ctx, extension):
try:
bot.load_extension(f"cogs.{extension}")
await ctx.message.add_reaction('✅')
except commands.ExtensionAlreadyLoaded:
await ctx.message.add_reaction('❌')
except commands.ExtensionNotFound:
await ctx.message.add_reaction('❓')
else:
await ctx.message.add_reaction('✅')
@bot.command(name='unload')
@commands.is_owner()
async def unload(ctx, extension):
try:
bot.unload_extension(f'cogs.{extension}')
await ctx.message.add_reaction('✅')
except commands.ExtensionNotLoaded:
await ctx.message.add_reaction('❌')
except commands.ExtensionNotFound:
await ctx.message.add_reaction('❓')
else:
await ctx.message.add_reaction('✅')
@bot.command(name='reload')
@commands.is_owner()
async def reload(ctx, extension):
try:
bot.unload_extension(f'cogs.{extension}')
bot.load_extension(f'cogs.{extension}')
await ctx.message.add_reaction('✅')
except commands.ExtensionNotLoaded:
await ctx.message.add_reaction('❌')
except commands.ExtensionNotFound:
await ctx.message.add_reaction('❓')
else:
await ctx.message.add_reaction('✅')
presence = [f'{version} Released', 'Belle Delphine <3', 'Fortnite is gay', 'Bugs are Features', 'By Staubtornado', 'Hentai']
@tasks.loop(seconds=20.0)
async def status_change():
await bot.wait_until_ready()
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name=f'!help | {random.choice(presence)}'))
status_change.start()
CommandOnCooldown_check = []
CommandNotFound_check = []
Else_check = []
@bot.event
async def on_command_error(ctx, error):
try:
if isinstance(error, commands.CommandOnCooldown):
if ctx.author.id in CommandOnCooldown_check:
return
else:
try:
await ctx.send(embed = discord.Embed(title = 'Cooldown...', description = f'Der Command kann erst in {round(error.retry_after, 2)} Sekunden wieder ausgeführt werden.', colour = int(config.get('COLOUR', 'rot'), base = 16)) .set_footer(text = f'Verursacht durch {ctx.author} | Du kannst diese Nachricht erst nach dem Cooldown wiedersehen.'))
except discord.Forbidden:
return
else:
CommandOnCooldown_check.append(ctx.author.id)
await asyncio.sleep(error.retry_after)
CommandOnCooldown_check.remove(ctx.author.id)
return
elif isinstance(error, commands.CommandNotFound):
if ctx.author.id in CommandNotFound_check:
return
else:
available_commands = []
for command in bot.all_commands:
try:
if await(bot.get_command(command).can_run(ctx)) is True:
available_commands.append(command)
except Exception:
pass
suggestion = ""
similarity_search = difflib.get_close_matches(str(ctx.message.content)[4:], available_commands)
for s in similarity_search:
suggestion += f'**-** `!{s}`\n'
embed = discord.Embed(title = 'Command nicht gefunden...', colour = int(config.get('COLOUR', 'rot'), base = 16))
if suggestion != '':
embed.description = f'Wir konnten keine Commands mit dem Namen `{str(ctx.message.content)[1:]}` finden. Villeicht meintest du:\n{suggestion}'
else:
embed.description = f'Wir konnten keine Commands mit dem Namen `{str(ctx.message.content)[1:]}` finden. Nutze `!help` für Hilfe.'
try:
await ctx.send(embed = embed)
except discord.Forbidden:
return
else:
CommandNotFound_check.append(ctx.author.id)
await asyncio.sleep(10)
CommandNotFound_check.remove(ctx.author.id)
return
# elif isinstance(error, commands.CheckFailure):
# return
else:
if ctx.author.id in Else_check:
return
else:
try:
await ctx.send(embed = discord.Embed(title = 'Unbekannter Fehler...', description = 'Ein unbekannter Fehler ist aufgetreten.', colour = int(config.get('COLOUR', 'rot'), base = 16)) .add_field(name = 'Details', value = str(error)))
except discord.Forbidden:
return
else:
Else_check.append(ctx.author.id)
await asyncio.sleep(10)
Else_check.remove(ctx.author.id)
return
except Exception as err:
return await ctx.send(embed = discord.Embed(title = 'Schwerwiegender Fehler', description = f'Ein schwerwiegender Fehler ist in unserem Error-Handler ausgetreten. Bitte konaktiere den Support und sende halte diesen Fehlercode bereit:\n`{error, err}`', colour = int(config.get('COLOUR', 'rot'), base = 16)))
@bot.event
async def on_ready():
print('BOT is online!')
bot.run(os.environ['DISCORD_TOKEN']) | [
"os.listdir",
"random.choice",
"configparser.ConfigParser",
"discord.Intents.all",
"discord.ext.commands.is_owner",
"dotenv.load_dotenv",
"asyncio.sleep",
"discord.ext.tasks.loop"
] | [((327, 354), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (352, 354), False, 'import configparser\n'), ((387, 407), 'dotenv.load_dotenv', 'dotenv.load_dotenv', ([], {}), '()\n', (405, 407), False, 'import dotenv\n'), ((425, 445), 'os.listdir', 'os.listdir', (['"""./cogs"""'], {}), "('./cogs')\n", (435, 445), False, 'import os\n'), ((565, 584), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (582, 584), False, 'from discord.ext import commands, tasks\n'), ((986, 1005), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (1003, 1005), False, 'from discord.ext import commands, tasks\n'), ((1407, 1426), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (1424, 1426), False, 'from discord.ext import commands, tasks\n'), ((1974, 1998), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'seconds': '(20.0)'}), '(seconds=20.0)\n', (1984, 1998), False, 'from discord.ext import commands, tasks\n'), ((264, 285), 'discord.Intents.all', 'discord.Intents.all', ([], {}), '()\n', (283, 285), False, 'import discord\n'), ((3096, 3128), 'asyncio.sleep', 'asyncio.sleep', (['error.retry_after'], {}), '(error.retry_after)\n', (3109, 3128), False, 'import asyncio\n'), ((4790, 4807), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (4803, 4807), False, 'import asyncio\n'), ((5535, 5552), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (5548, 5552), False, 'import asyncio\n'), ((2172, 2195), 'random.choice', 'random.choice', (['presence'], {}), '(presence)\n', (2185, 2195), False, 'import random\n')] |
import tensorflow as tf
import numpy as np
def euclidean_dist(x, y):
return np.linalg.norm(x - y)
def limit_gpu():
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
tf.config.set_logical_device_configuration(
gpus[0],
[tf.config.LogicalDeviceConfiguration(memory_limit=4000)])
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
| [
"tensorflow.config.list_logical_devices",
"tensorflow.config.list_physical_devices",
"tensorflow.config.LogicalDeviceConfiguration",
"numpy.linalg.norm"
] | [((82, 103), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - y)'], {}), '(x - y)\n', (96, 103), True, 'import numpy as np\n'), ((134, 172), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (165, 172), True, 'import tensorflow as tf\n'), ((390, 427), 'tensorflow.config.list_logical_devices', 'tf.config.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (420, 427), True, 'import tensorflow as tf\n'), ((305, 360), 'tensorflow.config.LogicalDeviceConfiguration', 'tf.config.LogicalDeviceConfiguration', ([], {'memory_limit': '(4000)'}), '(memory_limit=4000)\n', (341, 360), True, 'import tensorflow as tf\n')] |
from base import BaseDataSet, BaseDataLoader
from utils import pallete
import numpy as np
import os
import scipy
import torch
from PIL import Image
import cv2
from torch.utils.data import Dataset
from torchvision import transforms
import json
class VOCDataset(BaseDataSet):
def __init__(self, **kwargs):
self.num_classes = 21
self.palette = pallete.get_voc_pallete(self.num_classes)
super(VOCDataset, self).__init__(**kwargs)
def _set_files(self):
self.root = os.path.join(self.root, 'VOCdevkit/VOC2012')
if self.split == "val":
file_list = os.path.join("dataloaders/voc_splits", f"{self.split}" + ".txt")
elif self.split in ["train_supervised", "train_unsupervised"]:
file_list = os.path.join("dataloaders/voc_splits", f"{self.n_labeled_examples}_{self.split}" + ".txt")
else:
raise ValueError(f"Invalid split name {self.split}")
file_list = [line.rstrip().split(' ') for line in tuple(open(file_list, "r"))]
self.files, self.labels = list(zip(*file_list))
def _load_data(self, index):
image_path = os.path.join(self.root, self.files[index][1:])
image = np.asarray(Image.open(image_path), dtype=np.float32)
image_id = self.files[index].split("/")[-1].split(".")[0]
if self.use_weak_lables:
label_path = os.path.join(self.weak_labels_output, image_id+".png")
else:
label_path = os.path.join(self.root, self.labels[index][1:])
label = np.asarray(Image.open(label_path), dtype=np.int32)
return image, label, image_id
class VOC(BaseDataLoader):
def __init__(self, kwargs):
self.MEAN = [0.485, 0.456, 0.406]
self.STD = [0.229, 0.224, 0.225]
self.batch_size = kwargs.pop('batch_size')
kwargs['mean'] = self.MEAN
kwargs['std'] = self.STD
kwargs['ignore_index'] = 255
try:
shuffle = kwargs.pop('shuffle')
except:
shuffle = False
num_workers = kwargs.pop('num_workers')
self.dataset = VOCDataset(**kwargs)
super(VOC, self).__init__(self.dataset, self.batch_size, shuffle, num_workers, val_split=None)
| [
"PIL.Image.open",
"os.path.join",
"utils.pallete.get_voc_pallete"
] | [((363, 404), 'utils.pallete.get_voc_pallete', 'pallete.get_voc_pallete', (['self.num_classes'], {}), '(self.num_classes)\n', (386, 404), False, 'from utils import pallete\n'), ((503, 547), 'os.path.join', 'os.path.join', (['self.root', '"""VOCdevkit/VOC2012"""'], {}), "(self.root, 'VOCdevkit/VOC2012')\n", (515, 547), False, 'import os\n'), ((1133, 1179), 'os.path.join', 'os.path.join', (['self.root', 'self.files[index][1:]'], {}), '(self.root, self.files[index][1:])\n', (1145, 1179), False, 'import os\n'), ((604, 668), 'os.path.join', 'os.path.join', (['"""dataloaders/voc_splits"""', "(f'{self.split}' + '.txt')"], {}), "('dataloaders/voc_splits', f'{self.split}' + '.txt')\n", (616, 668), False, 'import os\n'), ((1207, 1229), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1217, 1229), False, 'from PIL import Image\n'), ((1373, 1429), 'os.path.join', 'os.path.join', (['self.weak_labels_output', "(image_id + '.png')"], {}), "(self.weak_labels_output, image_id + '.png')\n", (1385, 1429), False, 'import os\n'), ((1467, 1514), 'os.path.join', 'os.path.join', (['self.root', 'self.labels[index][1:]'], {}), '(self.root, self.labels[index][1:])\n', (1479, 1514), False, 'import os\n'), ((1542, 1564), 'PIL.Image.open', 'Image.open', (['label_path'], {}), '(label_path)\n', (1552, 1564), False, 'from PIL import Image\n'), ((764, 859), 'os.path.join', 'os.path.join', (['"""dataloaders/voc_splits"""', "(f'{self.n_labeled_examples}_{self.split}' + '.txt')"], {}), "('dataloaders/voc_splits', \n f'{self.n_labeled_examples}_{self.split}' + '.txt')\n", (776, 859), False, 'import os\n')] |
# coding: utf-8
from kerasy.Bio.tandem import find_tandem
from kerasy.utils import generateSeq
len_sequences = 1000
def get_test_data():
sequence = generateSeq(size=len_sequences,
nucleic_acid='DNA',
weights=None,
seed=123)
sequence = "".join(sequence)
return sequence
def test_find_tandem():
sequence = get_test_data()
max_val_sais, tandem_lists_sais = find_tandem(sequence, method="SAIS")
tandem_sais = tandem_lists_sais[0]
max_val_dp, tandem_lists_dp = find_tandem(sequence, method="DP")
tandem_dp = tandem_lists_dp[0]
assert max_val_sais == max_val_dp
assert any([tandem_dp[i:]+tandem_dp[:i] == tandem_sais for i in range(len(tandem_dp))])
| [
"kerasy.Bio.tandem.find_tandem",
"kerasy.utils.generateSeq"
] | [((154, 229), 'kerasy.utils.generateSeq', 'generateSeq', ([], {'size': 'len_sequences', 'nucleic_acid': '"""DNA"""', 'weights': 'None', 'seed': '(123)'}), "(size=len_sequences, nucleic_acid='DNA', weights=None, seed=123)\n", (165, 229), False, 'from kerasy.utils import generateSeq\n'), ((459, 495), 'kerasy.Bio.tandem.find_tandem', 'find_tandem', (['sequence'], {'method': '"""SAIS"""'}), "(sequence, method='SAIS')\n", (470, 495), False, 'from kerasy.Bio.tandem import find_tandem\n'), ((570, 604), 'kerasy.Bio.tandem.find_tandem', 'find_tandem', (['sequence'], {'method': '"""DP"""'}), "(sequence, method='DP')\n", (581, 604), False, 'from kerasy.Bio.tandem import find_tandem\n')] |
from PIL import Image, ImageDraw
from numpy import array, random, vstack, ones, linalg
from const import TOWERS
from copy import deepcopy
from os import path
class MapDrawer:
"""
a class for drawing Dota2Maps with replay-parsed data
"""
def __init__(self, towers, received_tables):
"""
@param towers: table containing info about towers
@param received_tables: the received_tables from the replay
"""
self.coordinates = []
libdir = path.abspath(path.dirname(__file__))
self.image = Image.open(libdir + "/assets/dota2map.png")
self.draw = ImageDraw.Draw(self.image)
self.map_w, self.map_h = self.image.size
# init information tables and respective columns
tower_info_table = received_tables.by_dt['DT_DOTA_BaseNPC_Tower']
position_x_index = tower_info_table.by_name['m_cellX']
position_y_index = tower_info_table.by_name['m_cellY']
position_vector_index = tower_info_table.by_name['m_vecOrigin']
name_index = tower_info_table.by_name['m_iName']
tower_list = deepcopy(TOWERS)
# getting world coordinates for every tower in TOWERS
for name, data in tower_list.iteritems():
for t in towers:
state = t.state
state_name = state.get(name_index)
if state_name == name:
data["worldX"] = state.get(position_x_index) + state.get(position_vector_index)[0] / 128.
if "worldY" not in data:
data["worldY"] = state.get(position_y_index) + state.get(position_vector_index)[1] / 128.
# caching vals, so ordering stays the same throughout the comprehensions
vals = tower_list.values()
x = [v["worldX"] for v in vals if "worldX" in v]
y = [v["worldY"] for v in vals if "worldY" in v]
x_map = [v["x"] for v in vals if "worldX" in v]
y_map = [v["y"] for v in vals if "worldY" in v]
# calculating scale and offset to convert worldcoordinates to coordinates on the map
Ax = vstack((x, ones(len(x)))).T
Ay = vstack((y, ones(len(y)))).T
self.scale_x, self.offset_x = linalg.lstsq(Ax, x_map)[0]
self.scale_y, self.offset_y = linalg.lstsq(Ay, y_map)[0]
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# x_tomap = [(a * self.scale_x) + self.offset_x for a in x]
# y_tomap = [(a * self.scale_y) + self.offset_y for a in y]
# plotting conversion output for debugging purposes
# fig, axarr = plt.subplots(nrows = 2, ncols = 2)
# axarr[0][0].scatter(x_tomap, y_tomap)
# axarr[0][1].scatter(x_map, y_map)
# axarr[1][0].scatter(x, y)
# plt.savefig("output/towers.png", figsize=(12, 4), dpi=150)
def draw_circle_world_coordinates(self, worldX, worldY, r=20, color="red"):
"""
draws a circle at the specified world coordinates (from bottom-left) with radius r (in px) and the color as required by PIL
"""
x, y = self.convert_coordinates(worldX, worldY)
bounds = (x-r, self.map_h-(y+r), x+r, self.map_h-(y-r))
bounds = tuple(int(round(a)) for a in bounds)
self.draw.ellipse(bounds, fill = color)
def draw_circle_world_coordinates_list(self, coordinates, r=20, color="red"):
"""
same as draw_circle_world_coordinates, but for batch drawing
"""
for x, y in coordinates:
self.draw_circle_world_coordinates(x, y, r, color)
def draw_circle_map_coordinates(self, x, y, r=20, color="red"):
"""
draws a circle on the specified pixels (from bottom-left) with radius r (in px) and the color as required by PIL
"""
bounds = (x-r, self.map_h-(y+r), x+r, self.map_h-(y-r))
bounds = tuple(int(round(a)) for a in bounds)
self.draw.ellipse(bounds, fill = color)
def draw_circle_map_coordinates_list(self, coordinates, r=20, color="red"):
"""
same as draw_circle_map_coordinates, but for batch drawing
"""
for x, y in coordinates:
self.draw_circle_map_coordinates(x, y, r, color)
def save(self, filename, scale=4):
"""
saves the map
"""
scaled = self.image.resize((self.map_w / scale, self.map_h / scale), Image.ANTIALIAS)
scaled.save(filename)
def convert_coordinates(self, worldX, worldY):
"""
converts world coordinates to map coordinates by using the scale and offset defined in the __init__ method
"""
return (worldX * self.scale_x) + self.offset_x, (worldY * self.scale_y) + self.offset_y | [
"PIL.Image.open",
"os.path.dirname",
"PIL.ImageDraw.Draw",
"numpy.linalg.lstsq",
"copy.deepcopy"
] | [((585, 628), 'PIL.Image.open', 'Image.open', (["(libdir + '/assets/dota2map.png')"], {}), "(libdir + '/assets/dota2map.png')\n", (595, 628), False, 'from PIL import Image, ImageDraw\n'), ((650, 676), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self.image'], {}), '(self.image)\n', (664, 676), False, 'from PIL import Image, ImageDraw\n'), ((1145, 1161), 'copy.deepcopy', 'deepcopy', (['TOWERS'], {}), '(TOWERS)\n', (1153, 1161), False, 'from copy import deepcopy\n'), ((539, 561), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (551, 561), False, 'from os import path\n'), ((2277, 2300), 'numpy.linalg.lstsq', 'linalg.lstsq', (['Ax', 'x_map'], {}), '(Ax, x_map)\n', (2289, 2300), False, 'from numpy import array, random, vstack, ones, linalg\n'), ((2343, 2366), 'numpy.linalg.lstsq', 'linalg.lstsq', (['Ay', 'y_map'], {}), '(Ay, y_map)\n', (2355, 2366), False, 'from numpy import array, random, vstack, ones, linalg\n')] |
def get_tokenizer(tokenizer):
if callable(tokenizer):
return tokenizer
if tokenizer == "spacy":
try:
import spacy
spacy_en = spacy.load('en')
return lambda s: [tok.text for tok in spacy_en.tokenizer(s)]
except ImportError:
print("Please install SpaCy and the SpaCy English tokenizer. "
"See the docs at https://spacy.io for more information.")
raise
except AttributeError:
print("Please install SpaCy and the SpaCy English tokenizer. "
"See the docs at https://spacy.io for more information.")
raise
elif tokenizer == "moses":
try:
from nltk.tokenize.moses import MosesTokenizer
moses_tokenizer = MosesTokenizer()
return moses_tokenizer.tokenize
except ImportError:
print("Please install NLTK. "
"See the docs at http://nltk.org for more information.")
raise
except LookupError:
print("Please install the necessary NLTK corpora. "
"See the docs at http://nltk.org for more information.")
raise
elif tokenizer == 'revtok':
try:
import revtok
return revtok.tokenize
except ImportError:
print("Please install revtok.")
raise
elif tokenizer == 'subword':
try:
import revtok
return lambda x: revtok.tokenize(x, decap=True)
except ImportError:
print("Please install revtok.")
raise
raise ValueError("Requested tokenizer {}, valid choices are a "
"callable that takes a single string as input, "
"\"revtok\" for the revtok reversible tokenizer, "
"\"subword\" for the revtok caps-aware tokenizer, "
"\"spacy\" for the SpaCy English tokenizer, or "
"\"moses\" for the NLTK port of the Moses tokenization "
"script.".format(tokenizer))
| [
"spacy.load",
"revtok.tokenize",
"nltk.tokenize.moses.MosesTokenizer"
] | [((174, 190), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (184, 190), False, 'import spacy\n'), ((794, 810), 'nltk.tokenize.moses.MosesTokenizer', 'MosesTokenizer', ([], {}), '()\n', (808, 810), False, 'from nltk.tokenize.moses import MosesTokenizer\n'), ((1500, 1530), 'revtok.tokenize', 'revtok.tokenize', (['x'], {'decap': '(True)'}), '(x, decap=True)\n', (1515, 1530), False, 'import revtok\n')] |
from servee import frontendadmin
from servee.frontendadmin.insert import ModelInsert
from oldcontrib.media.image.models import Image
class ImageInsert(ModelInsert):
model = Image
frontendadmin.site.register_insert(ImageInsert) | [
"servee.frontendadmin.site.register_insert"
] | [((185, 232), 'servee.frontendadmin.site.register_insert', 'frontendadmin.site.register_insert', (['ImageInsert'], {}), '(ImageInsert)\n', (219, 232), False, 'from servee import frontendadmin\n')] |
import torch
import numpy as np
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
def binary_reg(x: torch.Tensor):
# forward: f(x) = (x>=0)
# backward: f(x) = sigmoid
a = torch.sigmoid(x)
b = a.detach()
c = (x.detach() >= 0).float()
return a - b + c
class HIN2vec(nn.Module):
def __init__(self, node_size, path_size, embed_dim, sigmoid_reg=False, r=True):
super().__init__()
self.reg = torch.sigmoid if sigmoid_reg else binary_reg
self.__initialize_model(node_size, path_size, embed_dim, r)
def __initialize_model(self, node_size, path_size, embed_dim, r):
self.start_embeds = nn.Embedding(node_size, embed_dim)
self.end_embeds = self.start_embeds if r else nn.Embedding(node_size, embed_dim)
self.path_embeds = nn.Embedding(path_size, embed_dim)
# self.classifier = nn.Sequential(
# nn.Linear(embed_dim, 1),
# nn.Sigmoid(),
# )
def forward(self, start_node: torch.LongTensor, end_node: torch.LongTensor, path: torch.LongTensor):
# assert start_node.dim() == 1 # shape = (batch_size,)
s = self.start_embeds(start_node) # (batch_size, embed_size)
e = self.end_embeds(end_node)
p = self.path_embeds(path)
p = self.reg(p)
agg = torch.mul(s, e)
agg = torch.mul(agg, p)
# agg = F.sigmoid(agg)
# output = self.classifier(agg)
output = torch.sigmoid(torch.sum(agg, axis=1))
return output
def train(log_interval, model, device, train_loader: DataLoader, optimizer, loss_function, epoch):
model.train()
for idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data[:, 0], data[:, 1], data[:, 2])
loss = loss_function(output.view(-1), target)
loss.backward()
optimizer.step()
if idx % log_interval == 0:
print(f'\rTrain Epoch: {epoch} '
f'[{idx * len(data)}/{len(train_loader.dataset)} ({100. * idx / len(train_loader):.3f}%)]\t'
f'Loss: {loss.item():.3f}\t\t',
# f'data = {data}\t target = {target}',
end='')
print()
class NSTrainSet(Dataset):
"""
完全随机的负采样 todo 改进一下?
"""
def __init__(self, sample, node_size, neg=5):
"""
:param node_size: 节点数目
:param neg: 负采样数目
:param sample: HIN.sample()返回值,(start_node, end_node, path_id)
"""
print('init training dataset...')
l = len(sample)
x = np.tile(sample, (neg + 1, 1))
y = np.zeros(l * (1 + neg))
y[:l] = 1
# x[l:, 2] = np.random.randint(0, path_size - 1, (l * neg,))
x[l:, 1] = np.random.randint(0, node_size - 1, (l * neg,))
self.x = torch.LongTensor(x)
self.y = torch.FloatTensor(y)
self.length = len(x)
print('finished')
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.length
if __name__ == '__main__':
## test binary_reg
print('sigmoid')
a = torch.tensor([-1.,0.,1.],requires_grad=True)
b = torch.sigmoid(a)
c = b.sum()
print(a)
print(b)
print(c)
c.backward()
print(c.grad)
print(b.grad)
print(a.grad)
print('binary')
a = torch.tensor([-1., 0., 1.], requires_grad=True)
b = binary_reg(a)
c = b.sum()
print(a)
print(b)
print(c)
c.backward()
print(c.grad)
print(b.grad)
print(a.grad)
| [
"torch.mul",
"numpy.tile",
"torch.LongTensor",
"torch.sigmoid",
"torch.tensor",
"numpy.zeros",
"numpy.random.randint",
"torch.sum",
"torch.FloatTensor",
"torch.nn.Embedding"
] | [((205, 221), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (218, 221), False, 'import torch\n'), ((3209, 3259), 'torch.tensor', 'torch.tensor', (['[-1.0, 0.0, 1.0]'], {'requires_grad': '(True)'}), '([-1.0, 0.0, 1.0], requires_grad=True)\n', (3221, 3259), False, 'import torch\n'), ((3262, 3278), 'torch.sigmoid', 'torch.sigmoid', (['a'], {}), '(a)\n', (3275, 3278), False, 'import torch\n'), ((3434, 3484), 'torch.tensor', 'torch.tensor', (['[-1.0, 0.0, 1.0]'], {'requires_grad': '(True)'}), '([-1.0, 0.0, 1.0], requires_grad=True)\n', (3446, 3484), False, 'import torch\n'), ((668, 702), 'torch.nn.Embedding', 'nn.Embedding', (['node_size', 'embed_dim'], {}), '(node_size, embed_dim)\n', (680, 702), True, 'import torch.nn as nn\n'), ((820, 854), 'torch.nn.Embedding', 'nn.Embedding', (['path_size', 'embed_dim'], {}), '(path_size, embed_dim)\n', (832, 854), True, 'import torch.nn as nn\n'), ((1330, 1345), 'torch.mul', 'torch.mul', (['s', 'e'], {}), '(s, e)\n', (1339, 1345), False, 'import torch\n'), ((1360, 1377), 'torch.mul', 'torch.mul', (['agg', 'p'], {}), '(agg, p)\n', (1369, 1377), False, 'import torch\n'), ((2645, 2674), 'numpy.tile', 'np.tile', (['sample', '(neg + 1, 1)'], {}), '(sample, (neg + 1, 1))\n', (2652, 2674), True, 'import numpy as np\n'), ((2687, 2710), 'numpy.zeros', 'np.zeros', (['(l * (1 + neg))'], {}), '(l * (1 + neg))\n', (2695, 2710), True, 'import numpy as np\n'), ((2818, 2865), 'numpy.random.randint', 'np.random.randint', (['(0)', '(node_size - 1)', '(l * neg,)'], {}), '(0, node_size - 1, (l * neg,))\n', (2835, 2865), True, 'import numpy as np\n'), ((2884, 2903), 'torch.LongTensor', 'torch.LongTensor', (['x'], {}), '(x)\n', (2900, 2903), False, 'import torch\n'), ((2921, 2941), 'torch.FloatTensor', 'torch.FloatTensor', (['y'], {}), '(y)\n', (2938, 2941), False, 'import torch\n'), ((757, 791), 'torch.nn.Embedding', 'nn.Embedding', (['node_size', 'embed_dim'], {}), '(node_size, embed_dim)\n', (769, 791), True, 'import torch.nn as nn\n'), ((1481, 1503), 'torch.sum', 'torch.sum', (['agg'], {'axis': '(1)'}), '(agg, axis=1)\n', (1490, 1503), False, 'import torch\n')] |
#! /usr/bin/python3
from default_settings import default_settings
from ultron_cli import UltronCLI
if __name__ == '__main__':
default_settings()
try:
UltronCLI().cmdloop()
except KeyboardInterrupt:
print("\nInterrupted by user.")
print("Goodbye")
exit(0)
| [
"default_settings.default_settings",
"ultron_cli.UltronCLI"
] | [((132, 150), 'default_settings.default_settings', 'default_settings', ([], {}), '()\n', (148, 150), False, 'from default_settings import default_settings\n'), ((168, 179), 'ultron_cli.UltronCLI', 'UltronCLI', ([], {}), '()\n', (177, 179), False, 'from ultron_cli import UltronCLI\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
def soft_dice_score(
output: torch.Tensor, target: torch.Tensor, smooth: float = 0.0, eps: float = 1e-7, dims=None) -> torch.Tensor:
assert output.size() == target.size()
if dims is not None:
intersection = torch.sum(output * target, dim=dims)
cardinality = torch.sum(output + target, dim=dims)
# print('cardinality', cardinality, 'intersection', intersection)
else:
intersection = torch.sum(output * target)
cardinality = torch.sum(output + target)
dice_score = (2.0 * intersection + smooth) / (cardinality + smooth).clamp_min(eps)
# print('dice_score', dice_score)
return dice_score
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0, eps=1e-7, ignore_index=None, weight=None, mode='MULTICLASS_MODE'):
"""Implementation of Dice loss for image segmentation task.
https://github.com/qubvel/segmentation_models.pytorch
"""
super().__init__()
self.smooth = smooth
self.eps = eps
self.ignore_index = ignore_index
self.weight = weight
self.mode = mode
def forward(self, output, target):
bs = target.size(0)
num_classes = output.size(1)
dims = (0, 2)
# print(self.mode, self.ignore_index)
if self.mode == 'MULTICLASS_MODE':
output = output.log_softmax(dim=1).exp()
else:
output = F.logsigmoid(output).exp()
# output = output.log_softmax(dim=1).exp()
if self.mode == 'BINARY_MODE':
target = target.view(bs, 1, -1)
output = output.view(bs, 1, -1)
if self.ignore_index is not None:
mask = target != self.ignore_index
output = output * mask
target = target * mask
else:
target = target.view(bs, -1)
output = output.view(bs, num_classes, -1)
if self.ignore_index is not None:
mask = target != self.ignore_index
output = output * mask.unsqueeze(1)
target = F.one_hot((target * mask).to(torch.long), num_classes) # N,H*W -> N,H*W, C
target = target.permute(0, 2, 1) * mask.unsqueeze(1)
else:
target = F.one_hot(target, num_classes) # N,H*W -> N,H*W, C
target = target.permute(0, 2, 1) # H, C, H*W
scores = soft_dice_score(output, target.type_as(output), smooth=self.smooth, eps=self.eps, dims=dims)
loss = 1.0 - scores
mask = target.sum(dims) > 0
loss *= mask.to(loss.dtype)
return loss.mean()
| [
"torch.nn.functional.logsigmoid",
"torch.sum",
"torch.nn.functional.one_hot"
] | [((300, 336), 'torch.sum', 'torch.sum', (['(output * target)'], {'dim': 'dims'}), '(output * target, dim=dims)\n', (309, 336), False, 'import torch\n'), ((359, 395), 'torch.sum', 'torch.sum', (['(output + target)'], {'dim': 'dims'}), '(output + target, dim=dims)\n', (368, 395), False, 'import torch\n'), ((503, 529), 'torch.sum', 'torch.sum', (['(output * target)'], {}), '(output * target)\n', (512, 529), False, 'import torch\n'), ((552, 578), 'torch.sum', 'torch.sum', (['(output + target)'], {}), '(output + target)\n', (561, 578), False, 'import torch\n'), ((2335, 2365), 'torch.nn.functional.one_hot', 'F.one_hot', (['target', 'num_classes'], {}), '(target, num_classes)\n', (2344, 2365), True, 'import torch.nn.functional as F\n'), ((1480, 1500), 'torch.nn.functional.logsigmoid', 'F.logsigmoid', (['output'], {}), '(output)\n', (1492, 1500), True, 'import torch.nn.functional as F\n')] |
import inspect
import re
from functools import update_wrapper
from typing import Optional
def is_interactive() -> bool:
try:
_ = get_ipython().__class__.__name__ # type: ignore
return True
except NameError:
return False
def get_attr_docstring(class_type, attr_name) -> Optional[str]:
if attr_name == 'get':
attr_name = '__call__'
attr = getattr(class_type, attr_name, None)
if attr and attr.__doc__:
return re.sub(r' {3,}', '', attr.__doc__)
return None
def default_attr_filter(x) -> bool: # pylint: disable=unused-argument
return True
def get_class_docstring(class_type, attr_filter=default_attr_filter, extended=False):
def format_attribute(x):
attr = getattr(class_type, x)
if isinstance(attr, property):
name = f'.{x}'
else:
if extended:
sig = str(inspect.signature(attr)).replace('self, ', '')
else:
sig = '()'
name = f'.{x}{sig}'
if extended:
doc = get_attr_docstring(class_type, x)
else:
doc = ''
return f'{name}{doc}'
def filter_attribute(x):
return all(
[
not x.startswith('_'),
attr_filter(x),
not isinstance(getattr(class_type, x), property),
]
)
return '\n'.join(
map(
format_attribute,
filter(
filter_attribute,
dir(class_type),
),
),
)
def inline_doc(method):
if not is_interactive():
return method
doc = [repr(method)]
if method.__doc__:
doc.append(re.sub(r' {3,}', '', method.__doc__))
class CustomReprDescriptor:
def __get__(self, instance, owner):
class MethodWrapper:
def __init__(self):
self.class_instance = instance
self.doc = '\n'.join(doc)
def __call__(self, *args, **kwargs):
return method(self.class_instance, *args, **kwargs)
def __repr__(self):
return self.doc
return update_wrapper(MethodWrapper(), method)
return CustomReprDescriptor()
class InlineDocstring(type):
def __new__(mcs, name, bases, attrs, **kwargs):
if is_interactive():
new_attrs = {}
for attr_name, attr in attrs.items():
if callable(attr) and attr.__doc__ and not attr_name.startswith('_'):
attr = inline_doc(attr)
new_attrs[attr_name] = attr
else:
new_attrs = attrs
return type.__new__(mcs, name, bases, new_attrs, **kwargs)
| [
"re.sub",
"inspect.signature"
] | [((473, 506), 're.sub', 're.sub', (['""" {3,}"""', '""""""', 'attr.__doc__'], {}), "(' {3,}', '', attr.__doc__)\n", (479, 506), False, 'import re\n'), ((1717, 1752), 're.sub', 're.sub', (['""" {3,}"""', '""""""', 'method.__doc__'], {}), "(' {3,}', '', method.__doc__)\n", (1723, 1752), False, 'import re\n'), ((899, 922), 'inspect.signature', 'inspect.signature', (['attr'], {}), '(attr)\n', (916, 922), False, 'import inspect\n')] |
import torch
def train_one_epoch(model, train_loader, loss_func, optimizer):
model.train()
running_loss = 0.0
for batch_idx, (x, y) in enumerate(train_loader):
out = model(x)
optimizer.zero_grad()
loss = loss_func(out, y)
loss.backward()
optimizer.step()
running_loss = running_loss + loss.item()
return round(running_loss / (batch_idx+1), 5)
def valid_one_epoch(model, valid_loader, loss_func):
model.eval()
running_loss = 0.0
with torch.no_grad():
for batch_idx, (x, y) in enumerate(valid_loader):
out = model(x)
loss = loss_func(out, y)
running_loss = running_loss + loss.item()
return round(running_loss / (batch_idx+1), 5)
class Trainer:
def __init__(self, model, train_loader, optimizer, loss_func, valid_loader: None) -> None:
self.show_period = 1
self.model = model
self.train_loader = train_loader
self.valid_loader = valid_loader
self.optimizer = optimizer
self.loss_func = loss_func
self.train_loss = []
self.valid_loss = []
pass
def train(self, epochs=1):
for epoch in range(epochs):
if self.valid_loader is not None:
self.valid_loss.append(valid_one_epoch(
self.model, self.valid_loader, self.loss_func))
self.train_loss.append(train_one_epoch(
self.model, self.valid_loader, self.loss_func, self.optimizer))
if epoch % self.show_period == 0 or epoch == 0:
if self.valid_loader:
show_progress(
epoch, epochs, self.train_loss[-1], self.valid_loss[-1])
else:
show_progress(
epoch, epochs, self.train_loss[-1])
return None
def save_model(self, model_name):
save_model(self.model, model_name)
def show_progress(epoch=None, epochs=None, train_loss=None, valid_loss=None):
if valid_loss is not None:
print(
f"[{epoch}/{epochs}], training_loss:[{train_loss}], valid_loss:[{valid_loss}]", end="\r")
else:
print(
f"[{epoch}/{epochs}], training_loss:[{train_loss}]", end="\r")
def save_model(model, model_name: str, mode="full"):
if mode == "all":
torch.save(model, model_name+"_full_model.pth")
else:
torch.save(model.state_dict(), model_name+"_state_dict.pth")
| [
"torch.no_grad",
"torch.save"
] | [((513, 528), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (526, 528), False, 'import torch\n'), ((2365, 2414), 'torch.save', 'torch.save', (['model', "(model_name + '_full_model.pth')"], {}), "(model, model_name + '_full_model.pth')\n", (2375, 2414), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import uuid
from typing import Callable, Mapping, Optional
import numpy as np
from caffe2.python import workspace
from caffe2.python.predictor import predictor_exporter
from .builtin_task import register_builtin_tasks
from .config import PyTextConfig, pytext_config_from_json
from .config.component import create_featurizer
from .data.featurizer import InputRecord
from .utils.onnx_utils import CAFFE2_DB_TYPE, convert_caffe2_blob_name
register_builtin_tasks()
Predictor = Callable[[Mapping[str, str]], Mapping[str, np.array]]
def _predict(workspace_id, feature_config, predict_net, featurizer, input):
workspace.SwitchWorkspace(workspace_id)
features = featurizer.featurize(InputRecord(**input))
if feature_config.word_feat:
for blob_name in feature_config.word_feat.export_input_names:
converted_blob_name = convert_caffe2_blob_name(blob_name)
workspace.blobs[converted_blob_name] = np.array(
[features.tokens], dtype=str
)
workspace.blobs["tokens_lens"] = np.array([len(features.tokens)], dtype=np.int_)
if feature_config.dict_feat:
dict_feats, weights, lens = feature_config.dict_feat.export_input_names
converted_dict_blob_name = convert_caffe2_blob_name(dict_feats)
workspace.blobs[converted_dict_blob_name] = np.array(
[features.gazetteer_feats], dtype=str
)
workspace.blobs[weights] = np.array(
[features.gazetteer_feat_weights], dtype=np.float32
)
workspace.blobs[lens] = np.array(features.gazetteer_feat_lengths, dtype=np.int_)
if feature_config.char_feat:
for blob_name in feature_config.char_feat.export_input_names:
converted_blob_name = convert_caffe2_blob_name(blob_name)
workspace.blobs[converted_blob_name] = np.array(
[features.characters], dtype=str
)
workspace.RunNet(predict_net)
return {
str(blob): workspace.blobs[blob][0] for blob in predict_net.external_outputs
}
def load_config(filename: str) -> PyTextConfig:
"""
Load a PyText configuration file from a file path.
See pytext.config.pytext_config for more info on configs.
"""
with open(filename) as file:
config_json = json.loads(file.read())
if "config" not in config_json:
return pytext_config_from_json(config_json)
return pytext_config_from_json(config_json["config"])
def create_predictor(
config: PyTextConfig, model_file: Optional[str] = None
) -> Predictor:
"""
Create a simple prediction API from a training config and an exported caffe2
model file. This model file should be created by calling export on a trained
model snapshot.
"""
workspace_id = str(uuid.uuid4())
workspace.SwitchWorkspace(workspace_id, True)
predict_net = predictor_exporter.prepare_prediction_net(
filename=model_file or config.export_caffe2_path, db_type=CAFFE2_DB_TYPE
)
task = config.task
feature_config = task.features
featurizer = create_featurizer(task.featurizer, feature_config)
return lambda input: _predict(
workspace_id, feature_config, predict_net, featurizer, input
)
| [
"caffe2.python.workspace.RunNet",
"caffe2.python.workspace.SwitchWorkspace",
"caffe2.python.predictor.predictor_exporter.prepare_prediction_net",
"uuid.uuid4",
"numpy.array"
] | [((721, 760), 'caffe2.python.workspace.SwitchWorkspace', 'workspace.SwitchWorkspace', (['workspace_id'], {}), '(workspace_id)\n', (746, 760), False, 'from caffe2.python import workspace\n'), ((2019, 2048), 'caffe2.python.workspace.RunNet', 'workspace.RunNet', (['predict_net'], {}), '(predict_net)\n', (2035, 2048), False, 'from caffe2.python import workspace\n'), ((2899, 2944), 'caffe2.python.workspace.SwitchWorkspace', 'workspace.SwitchWorkspace', (['workspace_id', '(True)'], {}), '(workspace_id, True)\n', (2924, 2944), False, 'from caffe2.python import workspace\n'), ((2963, 3083), 'caffe2.python.predictor.predictor_exporter.prepare_prediction_net', 'predictor_exporter.prepare_prediction_net', ([], {'filename': '(model_file or config.export_caffe2_path)', 'db_type': 'CAFFE2_DB_TYPE'}), '(filename=model_file or config.\n export_caffe2_path, db_type=CAFFE2_DB_TYPE)\n', (3004, 3083), False, 'from caffe2.python.predictor import predictor_exporter\n'), ((1438, 1485), 'numpy.array', 'np.array', (['[features.gazetteer_feats]'], {'dtype': 'str'}), '([features.gazetteer_feats], dtype=str)\n', (1446, 1485), True, 'import numpy as np\n'), ((1543, 1604), 'numpy.array', 'np.array', (['[features.gazetteer_feat_weights]'], {'dtype': 'np.float32'}), '([features.gazetteer_feat_weights], dtype=np.float32)\n', (1551, 1604), True, 'import numpy as np\n'), ((1659, 1715), 'numpy.array', 'np.array', (['features.gazetteer_feat_lengths'], {'dtype': 'np.int_'}), '(features.gazetteer_feat_lengths, dtype=np.int_)\n', (1667, 1715), True, 'import numpy as np\n'), ((2881, 2893), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2891, 2893), False, 'import uuid\n'), ((1043, 1081), 'numpy.array', 'np.array', (['[features.tokens]'], {'dtype': 'str'}), '([features.tokens], dtype=str)\n', (1051, 1081), True, 'import numpy as np\n'), ((1941, 1983), 'numpy.array', 'np.array', (['[features.characters]'], {'dtype': 'str'}), '([features.characters], dtype=str)\n', (1949, 1983), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class cudaobjdetect_test(NewOpenCVTests):
def setUp(self):
super(cudaobjdetect_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
@unittest.skipIf('OPENCV_TEST_DATA_PATH' not in os.environ,
"OPENCV_TEST_DATA_PATH is not defined")
def test_hog(self):
img_path = os.environ['OPENCV_TEST_DATA_PATH'] + '/gpu/caltech/image_00000009_0.png'
npMat = cv.cvtColor(cv.imread(img_path),cv.COLOR_BGR2BGRA)
cuMat = cv.cuda_GpuMat(npMat)
cuHog = cv.cuda.HOG_create()
cuHog.setSVMDetector(cuHog.getDefaultPeopleDetector())
loc, conf = cuHog.detect(cuMat)
self.assertTrue(len(loc) == len(conf) and len(loc) > 0 and len(loc[0]) == 2)
loc = cuHog.detectWithoutConf(cuMat)
self.assertTrue(len(loc) > 0 and len(loc[0]) == 2)
loc = cuHog.detectMultiScaleWithoutConf(cuMat)
self.assertTrue(len(loc) > 0 and len(loc[0]) == 4)
cuHog.setGroupThreshold(0)
loc, conf = cuHog.detectMultiScale(cuMat)
self.assertTrue(len(loc) == len(conf) and len(loc) > 0 and len(loc[0]) == 4)
if __name__ == '__main__':
NewOpenCVTests.bootstrap() | [
"cv2.cuda_GpuMat",
"tests_common.unittest.skipIf",
"cv2.cuda.HOG_create",
"tests_common.NewOpenCVTests.bootstrap",
"cv2.cuda.getCudaEnabledDeviceCount",
"cv2.imread"
] | [((353, 455), 'tests_common.unittest.skipIf', 'unittest.skipIf', (["('OPENCV_TEST_DATA_PATH' not in os.environ)", '"""OPENCV_TEST_DATA_PATH is not defined"""'], {}), "('OPENCV_TEST_DATA_PATH' not in os.environ,\n 'OPENCV_TEST_DATA_PATH is not defined')\n", (368, 455), False, 'from tests_common import NewOpenCVTests, unittest\n'), ((1348, 1374), 'tests_common.NewOpenCVTests.bootstrap', 'NewOpenCVTests.bootstrap', ([], {}), '()\n', (1372, 1374), False, 'from tests_common import NewOpenCVTests, unittest\n'), ((677, 698), 'cv2.cuda_GpuMat', 'cv.cuda_GpuMat', (['npMat'], {}), '(npMat)\n', (691, 698), True, 'import cv2 as cv\n'), ((715, 735), 'cv2.cuda.HOG_create', 'cv.cuda.HOG_create', ([], {}), '()\n', (733, 735), True, 'import cv2 as cv\n'), ((246, 281), 'cv2.cuda.getCudaEnabledDeviceCount', 'cv.cuda.getCudaEnabledDeviceCount', ([], {}), '()\n', (279, 281), True, 'import cv2 as cv\n'), ((621, 640), 'cv2.imread', 'cv.imread', (['img_path'], {}), '(img_path)\n', (630, 640), True, 'import cv2 as cv\n')] |
import logging
import inspect
import re
from collections import OrderedDict
from gremlinpy.gremlin import Gremlin, Param, AS
from .entity import (_Entity, Vertex, Edge, GenericVertex, GenericEdge,
ENTITY_MAP)
from .exception import (AstronomerQueryException, AstronomerMapperException)
from .traversal import Traversal
from .util import (camel_to_underscore, GIZMO_ID, GIZMO_LABEL, GIZMO_TYPE,
GIZMO_ENTITY, GIZMO_VARIABLE, entity_name)
logger = logging.getLogger(__name__)
ENTITY_MAPPER_MAP = {}
GENERIC_MAPPER = 'generic.mapper'
_count = -1
_query_count = 0
_query_params = {}
def next_query_variable():
global _count
_count += 1
return '{}_{}'.format(GIZMO_VARIABLE, _count)
def get_entity_mapper(entity=None, name=GENERIC_MAPPER):
if isinstance(entity, _Entity):
name = get_qualified_instance_name(entity)
else:
name = get_qualified_name(entity)
if name not in ENTITY_MAPPER_MAP:
name = GENERIC_MAPPER
return ENTITY_MAPPER_MAP[name](self)
def next_param_name(param):
param = re.sub('\W', '_', param)
if param not in _query_params:
_query_params[param] = -1
_query_params[param] += 1
return '{}_{}'.format(param, _query_params[param])
def next_param(param, value):
if isinstance(value, _Entity):
value = entity_name(value)
return Param(next_param_name(param), value)
def next_entity_param(entity, param, value):
name = entity_name(entity)
field = '{}_{}'.format(name, param)
return next_param(field, value)
class Mapper:
def __init__(self, request, gremlin=None, auto_commit=True,
graph_instance_name=None):
if not gremlin:
gremlin = Gremlin()
self.request = request
self.gremlin = gremlin
self.auto_commit = auto_commit
self.graph_instance_name = graph_instance_name
if not self.auto_commit and not self.graph_instance_name:
error = ('If auto_commit is set, we need to know the'
' graph instance name')
logger.exception(error)
raise ArgumentError(error)
self.reset()
def reset(self):
self.gremlin.reset()
global _query_count
global _count
global _query_params
_query_count = 0
_count = 0
_query_params = {}
self.queries = []
self.return_vars = []
self.entities = OrderedDict() # ensure FIFO for testing
self.del_entities = {}
self.params = {}
self.callbacks = {}
self._magic_method = None
def get_entity_variable(self, entity):
ret = None
for key, def_entity in self.entities.items():
if entity == def_entity:
return key
return ret
def get_mapper(self, entity=None, name=GENERIC_MAPPER):
if entity is not None:
name = entity_name(entity)
if name not in ENTITY_MAPPER_MAP:
name = GENERIC_MAPPER
return ENTITY_MAPPER_MAP[name](self)
def enqueue_mapper(self, mapper):
self.queries += mapper.queries
self.return_vars += mapper.return_vars
self.entities.update(mapper.entities)
self.params.update(mapper.params)
for entity, callbacks in mapper.callbacks.items():
exisiting = self.callbacks.get(entity, [])
self.callbacks[entity] = exisiting + callbacks
mapper.reset()
return self
def enqueue_script(self, gremlin=None, script=None, params=None):
if gremlin is not None:
script = [str(gremlin),]
params = gremlin.bound_params
gremlin.reset()
if script:
self.queries += script
if params:
self.params.update(params)
return self
def __getattr__(self, magic_method):
"""magic method that works in conjunction with __call__
method these two methods are used to shortcut the retrieval
of an entity's mapper and call a specific method against
this chain:
user = User()
user_mapper = mapper.get_mapper(user)
emails = user_mapper.get_emails(user)
can be shortened into:
user = User()
emails = mapper.get_emails(user)
"""
self._magic_method = magic_method
return self
def __call__(self, *args, **kwargs):
mapper = self.get_mapper(args[0])
return getattr(mapper, self._magic_method)(*args, **kwargs)
async def data(self, entity, *args):
"""utility method used to retrieve an entity's data. It
also allows for method chaining in order to augment the
resulting data.
class MyMapper(_GenericMapper):
async def add_two(self, entity, data):
data['two'] = 2
return data
async def add_three(self, entity, data):
data['three'] = 3
return data
entity = User()
data = await mapper.data(user, 'add_two', 'add_three')
the resulting data will have the data from the User class,
plus a two and a three member
"""
collection = isinstance(entity, Collection)
async def get_data(entity, data):
retrieved = data
for method in args:
mapper = self.get_mapper(entity)
async def wrapper(entity, data):
res = await getattr(mapper, method)(entity=entity,
data=data)
return res
retrieved = await wrapper(entity=entity,
data=retrieved)
return retrieved
if collection:
data = []
for coll_entity in entity:
mapper = self.get_mapper(coll_entity)
entity_data = await mapper.data(coll_entity)
res = await get_data(coll_entity, entity_data)
data.append(res)
else:
mapper = self.get_mapper(entity)
entity_data = await mapper.data(entity)
data = await get_data(entity, entity_data)
return data
def save(self, entity, bind_return=True, mapper=None,
callback=None, **kwargs):
if mapper is None:
mapper = self.get_mapper(entity)
logger.debug(('Saving entity: {} with mapper:'
' {}').format(entity.__repr__, mapper))
mapper.save(entity, bind_return, callback, **kwargs)
return self.enqueue_mapper(mapper)
def delete(self, entity, mapper=None, callback=None):
if mapper is None:
mapper = self.get_mapper(entity)
logger.debug(('Deleting entity: {} with mapper:'
' {}').format(entity.__repr__, mapper))
mapper.delete(entity, callback=callback)
# manually add the deleted entity to the self.entities
# collection for callbacks
from random import randrange
key = 'DELETED_%s_entity' % str(randrange(0, 999999999))
self.del_entities[key] = entity
return self.enqueue_mapper(mapper)
def create(self, data=None, entity=None, data_type='python'):
if data is None:
data = {}
if entity:
mapper = self.get_mapper(entity)
else:
name = data.get(GIZMO_ENTITY, GENERIC_MAPPER)
if isinstance(name, (list, tuple)):
name = name[0]['value']
mapper = self.get_mapper(name=name)
kwargs = {
'data': data,
'entity': entity,
'data_type': data_type,
}
return mapper.create(**kwargs)
def connect(self, out_v, in_v, label=None, data=None, edge_entity=None,
data_type='python'):
"""
method used to connect two vertices and create an Edge object
the resulting edge is not saved to to graph until it is passed to
save allowing further augmentation
"""
if not isinstance(out_v, Vertex):
if not isinstance(out_v, (str, int)):
err = 'The out_v needs to be either a Vertex or an id'
logger.exception(err)
raise AstronomerMapperException(err)
if not isinstance(in_v, Vertex):
if not isinstance(in_v, (str, int)):
err = 'The in_v needs to be either a Vertex or an id'
logger.exception(err)
raise AstronomerMapperException(err)
if data is None:
data = {}
data['outV'] = out_v
data['inV'] = in_v
data[GIZMO_TYPE] = 'edge'
data[GIZMO_LABEL[0]] = label
return self.create(data=data, entity=edge_entity, data_type=data_type)
def start(self, entity):
mapper = self.get_mapper(entity)
return mapper.start(entity)
def _build_queries(self):
if len(self.return_vars) > 0:
returns = []
for k in self.return_vars:
returns.append("'{}': {}".format(k, k))
ret = '[{}]'.format(', '.join(returns))
self.queries.append(ret)
return self
def get(self, entity):
mapper = self.get_mapper(entity)
return mapper.get(entity)
def apply_statement(self, statement):
self.gremlin.apply_statement(statement)
return self
async def send(self):
self._build_queries()
script = ";\n".join(self.queries)
params = self.params
entities = self.entities
callbacks = self.callbacks
entities.update(self.del_entities)
self.reset()
res = await self.query(script=script, params=params,
update_entities=entities, callbacks=callbacks)
return res
async def query(self, script=None, params=None, gremlin=None,
update_entities=None, callbacks=None, collection=None):
if gremlin is not None:
script = str(gremlin)
params = gremlin.bound_params
gremlin.reset()
if script is None:
script = ''
if params is None:
params = {}
if update_entities is None:
update_entities = {}
self.reset()
response = await self.request.send(script, params, update_entities)
for k, entity in update_entities.items():
cbs = callbacks.get(entity, [])
for c in cbs:
c(entity)
if not collection:
collection = Collection
return collection(self, response)
class _RootMapper(type):
"""
In the case of custom mappers, this metaclass will register the entity name
with the mapper object. This is done so that when entities are loaded by
name the associated mapper is used to CRUD it.
This only works when the Mapper.create method is used to
create the entity
"""
def __new__(cls, name, bases, attrs):
cls = super(_RootMapper, cls).__new__(cls, name, bases, attrs)
entity = attrs.pop('entity', None)
if entity:
map_name = entity_name(entity)
ENTITY_MAPPER_MAP[map_name] = cls
elif name == 'EntityMapper':
ENTITY_MAPPER_MAP[GENERIC_MAPPER] = cls
return cls
def __call__(cls, *args, **kwargs):
mapper = super(_RootMapper, cls).__call__(*args, **kwargs)
for field in dir(mapper):
if field.startswith('_'):
continue
val = getattr(mapper, field)
if inspect.isclass(val) and issubclass(val, EntityMapper):
if mapper.mapper:
instance = val(mapper.mapper)
setattr(mapper, field, instance)
return mapper
class EntityMapper(metaclass=_RootMapper):
VARIABLE = GIZMO_VARIABLE
unique = False
unique_fields = None
save_statements = None
def __init__(self, mapper=None):
self.mapper = mapper
self.gremlin = None
if self.mapper:
self.gremlin = mapper.gremlin
self.reset()
def reset(self):
self.queries = []
self.return_vars = []
self.entities = {}
self.params = {}
self.callbacks = {}
async def data(self, entity):
return entity.data
def get(self, entity):
trav = self.start(entity)
vertex = issubclass(self.entity, Vertex)
param_value = str(self.entity)
param_name = 'out_{}_{}'.format(entity.__class__.__name__, param_value)
entity_param = next_param(param_name, param_value)
if vertex:
trav.out().hasLabel(entity_param)
else:
trav.outE(entity_param)
return trav
def enqueue(self, query, bind_return=True):
for entry in query.queries:
script = entry['script']
if script in self.queries:
continue
if bind_return:
variable = next_query_variable()
script = '{} = {}'.format(variable, script)
if 'entity' in entry:
self.entities[variable] = entry['entity']
self.return_vars.append(variable)
self.queries.append(script)
self.params.update(entry['params'])
return self
def _enqueue_callback(self, entity, callback):
if callback:
listed = self.callbacks.get(entity, [])
if isinstance(callback, (list, tuple)):
listed += list(callback)
elif callback:
listed.append(callback)
self.callbacks[entity] = listed
return self
def on_create(self, entity):
pass
def on_update(self, entity):
pass
def on_delete(self, entity):
pass
def _build_save_statements(self, entity, query, **kwargs):
statement_query = Query(self.mapper)
query_gremlin = Gremlin(self.gremlin.gv)
for entry in query.queries:
query_gremlin.bind_params(entry['params'])
for statement in self.save_statements:
instance = statement(entity, self, query, **kwargs)
query_gremlin.apply_statement(instance)
statement_query._add_query(str(query_gremlin),
query_gremlin.bound_params, entity=entity)
return statement_query
def start(self, entity=None):
return Traversal(self.mapper, entity or self.entity)
def save(self, entity, bind_return=True, callback=None, *args, **kwargs):
"""callback and be a single callback or a list of them"""
method = '_save_edge' if entity[GIZMO_TYPE] == 'edge' else \
'_save_vertex'
if not isinstance(callback, (list, tuple)) and callback:
callback = [callback]
else:
callback = []
if entity[GIZMO_ID]:
callback.insert(0, self.on_update)
else:
callback.insert(0, self.on_create)
self._enqueue_callback(entity, callback)
return getattr(self, method)(entity=entity, bind_return=bind_return)
def _save_vertex(self, entity, bind_return=True):
"""
method used to save a entity. IF both the unique_type and unique_fields
params are set, it will run a sub query to check to see if an entity
exists that matches those values
"""
query = Query(self.mapper)
ref = self.mapper.get_entity_variable(entity)
"""
check to see if the entity has been used already in the current script
execution.
If it has use the reference
if it hasnt, go through the process of saving it
"""
if ref:
query._add_query(ref, params=None, entity=entity)
return self.enqueue(query, bind_return)
query.save(entity)
if not entity[GIZMO_ID] and self.unique_fields:
from .statement import MapperUniqueVertex
if not self.save_statements:
self.save_statements = []
if MapperUniqueVertex not in self.save_statements:
self.save_statements.append(MapperUniqueVertex)
if self.save_statements and len(self.save_statements):
statement_query = self._build_save_statements(entity, query)
return self.enqueue(statement_query, bind_return)
else:
return self.enqueue(query, bind_return)
def _save_edge(self, entity, bind_return=True):
query = Query(self.mapper)
save = True
edge_ref = self.mapper.get_entity_variable(entity)
out_v = entity.out_v
out_v_id = out_v[GIZMO_ID] if isinstance(out_v, Vertex) else None
in_v = entity.in_v
in_v_id = in_v[GIZMO_ID] if isinstance(in_v, Vertex) else None
out_v_ref = self.mapper.get_entity_variable(out_v)
in_v_ref = self.mapper.get_entity_variable(in_v)
if edge_ref:
query._add_query(edge_ref, params=None, entity=entity)
return self.enqueue(query, bind_return)
"""
both out_v and in_v are checked to see if the entities stored in each
respective variable has been used.
If they have not and they are Vertex instances with an empty _id,
send them to be saved.
if they have been used, use the reference variable in the create edge
logic
"""
query.save(entity)
if not entity[GIZMO_ID] and self.unique and in_v_id and out_v_id:
from .statement import MapperUniqueEdge
if not self.save_statements:
self.save_statements = []
if MapperUniqueEdge not in self.save_statements:
self.save_statements.append(MapperUniqueEdge)
if self.save_statements and len(self.save_statements):
statement_query = self._build_save_statements(entity, query,
out_v_id=out_v_id, in_v_id=in_v_id,
label=entity[GIZMO_LABEL[0]], direction=self.unique)
return self.enqueue(statement_query, False)
else:
return self.enqueue(query, bind_return)
def delete(self, entity, lookup=True, callback=None):
query = Query(self.mapper)
if not isinstance(callback, (list, tuple)) and callback:
callback = [callback]
else:
callback = []
query.delete(entity)
callback.insert(0, self.on_delete)
self._enqueue_callback(entity, callback)
return self.enqueue(query, False)
def create(self, data=None, entity=None, data_type='python'):
"""
Method used to create a new entity based on the data that is passed in.
If the kwarg entity is passed in, it will be used to create the
entity else if utils.GIZMO_ENTITY is in data, that will be used
finally, entity.GenericVertex or entity.GenericEdge will be used to
construct the entity
"""
check = True
if data is None:
data = {}
if entity is not None:
try:
label = data.get(GIZMO_LABEL[0], None)
entity = entity(data=data, data_type=data_type)
check = False
for f, r in entity._relationships.items():
r._mapper = self.mapper
r._entity = entity
except Exception as e:
pass
if check:
try:
if GIZMO_ENTITY in data:
name = data[GIZMO_ENTITY]
if isinstance(name, (list, tuple)):
name = name[0]['value']
entity = ENTITY_MAP[name](data=data, data_type=data_type)
for f, r in entity._relationships.items():
r._mapper = self.mapper
r._entity = entity
else:
raise
except Exception as e:
# all else fails create a GenericVertex unless _type is 'edge'
if data.get(GIZMO_TYPE, None) == 'edge':
entity = GenericEdge(data=data, data_type=data_type)
else:
entity = GenericVertex(data=data, data_type=data_type)
if GIZMO_ID in data:
entity[GIZMO_ID] = data[GIZMO_ID]
return entity
def delete(self, entity, lookup=True, callback=None):
query = Query(self.mapper)
if not isinstance(callback, (list, tuple)) and callback:
callback = [callback]
else:
callback = []
query.delete(entity)
callback.insert(0, self.on_delete)
self._enqueue_callback(entity, callback)
return self.enqueue(query, False)
class Query:
def __init__(self, mapper):
self.mapper = mapper
self.gremlin = Gremlin(self.mapper.gremlin.gv)
self.queries = []
self.fields = []
self.reset()
def reset(self):
self.fields = []
return self
def _add_query(self, script, params=None, entity=None):
if params is None:
params = {}
self.queries.append({
'script': script,
'params': params,
'entity': entity,
})
return self
def _add_gremlin_query(self, entity=None):
script = str(self.gremlin)
params = self.gremlin.bound_params
self._add_query(script, params, entity)
return self.reset()
def _field_changes(self, gremlin, entity, ignore=None):
ignore = ignore or []
entity_name = str(entity)
entity_alias = '{}_alias'.format(entity_name)
entity_alias = next_param(entity_alias, entity_alias)
def add_field(field, data):
values = data.get('values', data.get('value', None))
if not isinstance(values, (list, tuple,)):
values = [values, ]
for i, value in enumerate(values):
name = '{}_{}_{}'.format(entity_name, field, i)
prop = "'{}'".format(field)
gremlin.property(prop, Param(name, value))
def add_property(field, value, properties=None, ignore=None):
ignore = ignore or []
if field.startswith('T.'):
val_param = next_param('{}_{}'.format(entity_name,
field), value)
gremlin.unbound('property', field, val_param)
return
field_name = '{}_{}'.format(entity_name, field)
prop = next_param(field_name, field)
value_name = '{}_value'.format(field_name)
value_param = next_param(value_name, value)
params = [prop, value_param]
if properties:
for key, val in properties.items():
prop_key = next_param('{}_{}'.format(prop.name,
key), key)
prop_val = next_param('{}_{}_val'.format(prop.name,
key), val)
params += [prop_key, prop_val]
gremlin.property(*params)
for field, changes in entity.changes.items():
if field in ignore:
continue
if changes['immutable']:
for val in changes['values']['values']:
add_property(field, val)
elif changes['deleted']:
prop = next_param('{}_{}'.format(entity_name, field), field)
remove = Gremlin('').it.get().func('remove')
gremlin.AS(entity_alias).properties(prop)
gremlin.sideEffect.close(remove)
gremlin.select(entity_alias)
else:
for action, value in changes['values'].items():
if action == 'added':
for val in value:
add_property(field, val['value'],
val['properties'])
def _add_vertex(self, entity, set_variable=None):
entity.data_type = 'graph'
gremlin = self.gremlin
label = None
ignore = ['T.label', 'label']
if entity['label']:
label = next_entity_param(entity, 'label', entity['label'])
gremlin.unbound('addV', 'T.label', label)
else:
gremlin.addV()
if set_variable:
gremlin.set_ret_variable(set_variable, ignore=[GIZMO_ID, ])
self._field_changes(gremlin, entity, ignore=ignore)
gremlin.func('next')
entity.data_type = 'python'
return self._add_gremlin_query(entity)
def _update_entity(self, entity, set_variable=None):
entity.data_type = 'graph'
gremlin = self.gremlin
entity_type, entity_id = entity.get_rep()
if not entity_id:
error = (('The entity {} scheduled to be updated does not have'
' an id').format(str(entity)))
logger.exception(error)
raise Exception()
_id = next_param('{}_ID'.format(str(entity)), entity_id)
ignore = [GIZMO_ID, GIZMO_LABEL[1]]
alias = '{}_{}_updating'.format(entity_type, entity_id)
alias = next_param(alias, alias)
getattr(gremlin, entity_type.upper())(_id)
gremlin.AS(alias)
self._field_changes(gremlin, entity, ignore=ignore)
gremlin.select(alias).next()
entity.data_type = 'python'
return self._add_gremlin_query(entity)
def _add_edge(self, entity, set_variable=None):
if not entity[GIZMO_LABEL[0]]:
msg = 'A label is required in order to create an edge'
logger.exception(msg)
raise AstronomerQueryException(msg)
def get_or_create_ends():
"""this function will determine if the edge has both ends. If
either end is an _Entity object it will get the reference to
the object or save it and create a reference. Either the entity's
id or reference will be used when saving the edge.
"""
out_v = entity.out_v
out_v_ref = None
in_v = entity.in_v
in_v_ref = None
if out_v is None or in_v is None:
error = ('Both out and in vertices must be set before'
' saving the edge')
logger.exception(error)
raise AstronomerQueryException(error)
if isinstance(out_v, _Entity):
if out_v[GIZMO_ID]:
out_v = out_v[GIZMO_ID]
else:
out_v_ref = self.mapper.get_entity_variable(out_v)
if not out_v_ref:
self.mapper.save(out_v)
out_v_ref = self.mapper.get_entity_variable(out_v)
if out_v_ref:
out_v = out_v_ref
if isinstance(in_v, _Entity):
if in_v[GIZMO_ID]:
in_v = in_v[GIZMO_ID]
else:
in_v_ref = self.mapper.get_entity_variable(in_v)
if not in_v_ref:
self.mapper.save(in_v)
in_v_ref = self.mapper.get_entity_variable(in_v)
if in_v_ref:
in_v = in_v_ref
return {
'out': {
'is_ref': out_v_ref,
'v': out_v,
},
'in': {
'is_ref': in_v_ref,
'v': in_v,
},
}
ends = get_or_create_ends()
name = str(entity)
gremlin = self.gremlin
g = Gremlin(gremlin.gv)
label = next_param('{}_label'.format(name), entity[GIZMO_LABEL[0]])
"""
g.V($OUT_ID).next().addEdge($LABEL, g.V($IN_ID).next()).property(....)
"""
in_v = ends['in']
out_v = ends['out']
if in_v['is_ref']:
g.unbound('V', in_v['v'])
else:
in_id = next_param('{}_in'.format(name), in_v['v'])
g.V(in_id)
g.func('next')
if out_v['is_ref']:
gremlin.unbound('V', out_v['v'])
else:
out_id = next_param('{}_out'.format(name), out_v['v'])
gremlin.V(out_id)
ignore = [GIZMO_LABEL[0], GIZMO_LABEL[1], GIZMO_TYPE]
edge_args = [label, g]
# edge properites only get one value and no meta-properties
for field, changes in entity.changes.items():
if field in ignore:
continue
try:
if changes['immutable']:
value = changes['values']['values'][-1]
else:
value = changes['values'][-1]
except:
continue
field_param = next_param('{}_{}'.format(name, field), field)
field_value = next_param('{}_value'.format(field_param.name),
value)
edge_args += [field_param, field_value]
gremlin.func('next').addEdge(*edge_args)
return self._add_gremlin_query(entity)
def save(self, entity, set_variable=None):
if not entity[GIZMO_TYPE]:
msg = 'The entity does not have a type defined'
logger.exception(msg)
raise AstronomerQueryException(msg)
entity_type = entity[GIZMO_TYPE]
if not entity[GIZMO_ID]:
if entity_type == 'vertex':
self._add_vertex(entity, set_variable)
else:
self._add_edge(entity, set_variable)
else:
self._update_entity(entity, set_variable)
def delete(self, entity):
entity_type, _id = entity.get_rep()
if not _id:
msg = ('The entity does not have an id defined and'
' connot be deleted')
logger.exception(msg)
raise AstronomerQueryException(msg)
if not entity[GIZMO_TYPE]:
msg = 'The entity does not have a type defined'
logger.exception(msg)
raise AstronomerQueryException(msg)
delete = next_param('{}_ID'.format(str(entity)), _id)
getattr(self.gremlin, entity_type)(delete).next().func('remove')
return self._add_gremlin_query(entity)
class Collection(object):
def __init__(self, mapper, response=None):
self.mapper = mapper
if not response:
response = lambda: None
response.data = []
self.response = response
self._entities = {}
self._index = 0
self._data_type = 'python'
def first(self):
return self[0]
def last(self):
return self[-1]
def get_data(self):
return [x for x in self.response.data]
data = property(get_data)
@property
def entity_data(self):
"""
this will get the instance data instead of the
raw data. This will use the mapper to create each
entity. Which may have a custom data attribute
"""
return [x.data for x in self]
@property
async def mapper_data(self):
"""this will get the data from the entity's mapper if it has a
custom mapper
"""
data = []
if len(self):
mapper = self.mapper.get_mapper(self[0])
for entity in self:
data.append(await mapper.data(entity))
return data
def __len__(self):
return len(self.response.data)
def __getitem__(self, key):
entity = self._entities.get(key, None)
if entity is None:
try:
data = self.response[key]
if data is not None:
entity = self.mapper.create(data=data,
data_type=self._data_type)
entity.dirty = False
self._entities[key] = entity
else:
raise StopIteration()
except Exception as e:
raise StopIteration()
return entity
def __setitem__(self, key, value):
self._entities[key] = value
def __delitem__(self, key):
if key in self._entities:
del self._entities[key]
def __iter__(self):
return self
def __next__(self):
entity = self[self._index]
self._index += 1
return entity
| [
"logging.getLogger",
"gremlinpy.gremlin.Param",
"collections.OrderedDict",
"random.randrange",
"inspect.isclass",
"gremlinpy.gremlin.Gremlin",
"re.sub"
] | [((459, 486), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (476, 486), False, 'import logging\n'), ((1057, 1082), 're.sub', 're.sub', (['"""\\\\W"""', '"""_"""', 'param'], {}), "('\\\\W', '_', param)\n", (1063, 1082), False, 'import re\n'), ((2440, 2453), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2451, 2453), False, 'from collections import OrderedDict\n'), ((14064, 14088), 'gremlinpy.gremlin.Gremlin', 'Gremlin', (['self.gremlin.gv'], {}), '(self.gremlin.gv)\n', (14071, 14088), False, 'from gremlinpy.gremlin import Gremlin, Param, AS\n'), ((21034, 21065), 'gremlinpy.gremlin.Gremlin', 'Gremlin', (['self.mapper.gremlin.gv'], {}), '(self.mapper.gremlin.gv)\n', (21041, 21065), False, 'from gremlinpy.gremlin import Gremlin, Param, AS\n'), ((27904, 27923), 'gremlinpy.gremlin.Gremlin', 'Gremlin', (['gremlin.gv'], {}), '(gremlin.gv)\n', (27911, 27923), False, 'from gremlinpy.gremlin import Gremlin, Param, AS\n'), ((1716, 1725), 'gremlinpy.gremlin.Gremlin', 'Gremlin', ([], {}), '()\n', (1723, 1725), False, 'from gremlinpy.gremlin import Gremlin, Param, AS\n'), ((7084, 7107), 'random.randrange', 'randrange', (['(0)', '(999999999)'], {}), '(0, 999999999)\n', (7093, 7107), False, 'from random import randrange\n'), ((11656, 11676), 'inspect.isclass', 'inspect.isclass', (['val'], {}), '(val)\n', (11671, 11676), False, 'import inspect\n'), ((22306, 22324), 'gremlinpy.gremlin.Param', 'Param', (['name', 'value'], {}), '(name, value)\n', (22311, 22324), False, 'from gremlinpy.gremlin import Gremlin, Param, AS\n'), ((23690, 23701), 'gremlinpy.gremlin.Gremlin', 'Gremlin', (['""""""'], {}), "('')\n", (23697, 23701), False, 'from gremlinpy.gremlin import Gremlin, Param, AS\n')] |
import sys
class MenuHandler(object):
def __init__(self, client, client_channel, remote_checker):
self._client = client
self._client_channel = client_channel
self._buffer = ''
self._remote_checker = remote_checker
@staticmethod
def create_from_channel(client, client_channel, remote_checker) -> 'MenuHandler':
return MenuHandler(client, client_channel, remote_checker)
def serve_remote_menu(self):
if not self._remote_checker.is_remote_set:
self._client_channel.send('-------------------------------------------------\r\n')
self._client_channel.send('| Welcome to Praetorian SSH proxy |\r\n')
self._client_channel.send('-------------------------------------------------\r\n')
for counter, remote in enumerate(self._remote_checker.remote, 1):
self._client_channel.send('| ({:10}) {:30} {} |\r\n'.format(remote.project['name'], remote.name, counter))
self._client_channel.send('-------------------------------------------------\r\n')
self._client_channel.send('| {:43} {} |\r\n'.format('exit', len(self._remote_checker.remote) + 1))
self._client_channel.send('-------------------------------------------------\r\n')
self._client_channel.send('Choose your remote: ')
while True:
data = self._client_channel.recv(1024)
if not data:
continue
# BACKSPACE
if data == b'\x7f':
self._buffer = self._buffer[:-1]
self._client_channel.send('\b \b')
# EXIT (CTRL+C)
elif data == b'\x03':
self._client_channel.send(f'\n\rExiting ...\r\n')
self._client.close()
sys.exit(0)
# ENTER
elif data == b'\r':
if self._buffer in map(str, range(1, len(self._remote_checker.remote) + 1)):
self._remote_checker.set_remote(self._remote_checker.remote[int(self._buffer) - 1])
self._client_channel.send(f'\n\rChosen remote {self._remote_checker.remote.name}.\r\n')
return
elif self._buffer == str(len(self._remote_checker.remote) + 1):
self._client_channel.send(f'\n\rExiting ...\r\n')
self._client.close()
sys.exit(0)
else:
self._client_channel.send('\n\rWrong option.\r\n')
self._client_channel.send('Choose your option: ')
self._buffer = ''
else:
self._client_channel.send(data)
self._buffer += data.decode()
| [
"sys.exit"
] | [((1873, 1884), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1881, 1884), False, 'import sys\n'), ((2522, 2533), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2530, 2533), False, 'import sys\n')] |
#!/usr/bin/env python3
# Calls Google Translate to produce translations.
# To use, set "language" and "dest_language" below. (They are normally the same,
# unless Google uses a different language code than we do.) Then fill in
# the definition_[language] fields with "TRANSLATE" or
# "TRANSLATE: [replacement definition]". The latter is to allow for a better
# translation when the original definition is ambiguous, e.g., if the definition
# is "launcher", a better translation might result from
# "TRANSLATE: rocket launcher".
from googletrans import Translator
import fileinput
import re
import time
# TODO: Refactor this and also use in renumber.py.
# Ignore mem-00-header.xml and mem-28-footer.xml because they don't contain entries.
filenames = ['mem-01-b.xml', 'mem-02-ch.xml', 'mem-03-D.xml', 'mem-04-gh.xml', 'mem-05-H.xml', 'mem-06-j.xml', 'mem-07-l.xml', 'mem-08-m.xml', 'mem-09-n.xml', 'mem-10-ng.xml', 'mem-11-p.xml', 'mem-12-q.xml', 'mem-13-Q.xml', 'mem-14-r.xml', 'mem-15-S.xml', 'mem-16-t.xml', 'mem-17-tlh.xml', 'mem-18-v.xml', 'mem-19-w.xml', 'mem-20-y.xml', 'mem-21-a.xml', 'mem-22-e.xml', 'mem-23-I.xml', 'mem-24-o.xml', 'mem-25-u.xml', 'mem-26-suffixes.xml', 'mem-27-extra.xml']
translator = Translator()
language = "zh-HK"
dest_language = "zh-TW"
limit = 250
for filename in filenames:
with fileinput.FileInput(filename, inplace=True) as file:
definition = ""
for line in file:
definition_match = re.search(r"definition\">?(.+)<", line)
definition_translation_match = re.search(r"definition_(.+)\">TRANSLATE(?:: (.*))?<", line)
if (definition_match):
definition = definition_match.group(1)
if (limit > 0 and \
definition != "" and \
definition_translation_match and \
language.replace('-','_') == definition_translation_match.group(1)):
if definition_translation_match.group(2):
definition = definition_translation_match.group(2)
translation = translator.translate(definition, src='en', dest=dest_language)
line = re.sub(r">(.*)<", ">%s [AUTOTRANSLATED]<" % translation.text, line)
# Rate-limit calls to Google Translate.
limit = limit - 1
time.sleep(0.1)
print(line, end='')
| [
"googletrans.Translator",
"time.sleep",
"fileinput.FileInput",
"re.sub",
"re.search"
] | [((1217, 1229), 'googletrans.Translator', 'Translator', ([], {}), '()\n', (1227, 1229), False, 'from googletrans import Translator\n'), ((1319, 1362), 'fileinput.FileInput', 'fileinput.FileInput', (['filename'], {'inplace': '(True)'}), '(filename, inplace=True)\n', (1338, 1362), False, 'import fileinput\n'), ((1439, 1478), 're.search', 're.search', (['"""definition\\\\">?(.+)<"""', 'line'], {}), '(\'definition\\\\">?(.+)<\', line)\n', (1448, 1478), False, 'import re\n'), ((1516, 1575), 're.search', 're.search', (['"""definition_(.+)\\\\">TRANSLATE(?:: (.*))?<"""', 'line'], {}), '(\'definition_(.+)\\\\">TRANSLATE(?:: (.*))?<\', line)\n', (1525, 1575), False, 'import re\n'), ((2048, 2114), 're.sub', 're.sub', (['""">(.*)<"""', "('>%s [AUTOTRANSLATED]<' % translation.text)", 'line'], {}), "('>(.*)<', '>%s [AUTOTRANSLATED]<' % translation.text, line)\n", (2054, 2114), False, 'import re\n'), ((2199, 2214), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2209, 2214), False, 'import time\n')] |
"""
Base classes and utilities for all Xena Manager (Xena) objects.
:author: <EMAIL>
"""
import time
import re
import logging
from collections import OrderedDict
from trafficgenerator.tgn_utils import TgnError
from trafficgenerator.tgn_object import TgnObject, TgnObjectsDict
logger = logging.getLogger(__name__)
class XenaAttributeError(TgnError):
pass
class XenaObjectsDict(TgnObjectsDict):
def __getitem__(self, key):
""" Override default implementation and allow access with index as well. """
if TgnObjectsDict.__getitem__(self, key) is not None:
return TgnObjectsDict.__getitem__(self, key)
else:
for obj in self:
if obj.index == key:
return OrderedDict.__getitem__(self, obj)
class XenaObject(TgnObject):
""" Base class for all Xena objects. """
def __init__(self, **data):
if data['parent']:
self.session = data['parent'].session
self.chassis = data['parent'].chassis
if 'objRef' not in data:
data['objRef'] = '{}/{}/{}'.format(data['parent'].ref, data['objType'], data['index'].split('/')[-1])
if 'name' not in data:
data['name'] = data['index']
super(XenaObject, self).__init__(**data)
def obj_index(self):
"""
:return: object index.
"""
return str(self._data['index'])
index = property(obj_index)
def obj_id(self):
"""
:return: object ID.
"""
return int(self.index.split('/')[-1]) if self.index else None
id = property(obj_id)
def _create(self):
self.api.create(self)
def reserve(self, force=False):
""" Reserve object.
XenaManager-2G -> [Relinquish]/Reserve Chassis/Module/Port.
:param force: True - take forcefully, False - fail if port is reserved by other user
"""
reservation = self.get_attribute(self.cli_prefix + '_reservation')
if reservation == 'RESERVED_BY_YOU':
return
elif reservation == 'RESERVED_BY_OTHER' and not force:
reservedby = self.get_attribute(self.cli_prefix + '_reservedby')
raise TgnError('Resource {} reserved by {}'.format(self, reservedby))
self.relinquish()
self.send_command(self.cli_prefix + '_reservation', 'reserve')
def relinquish(self):
""" Relinquish object.
XenaManager-2G -> Relinquish Chassis/Module/Port.
"""
if self.get_attribute(self.cli_prefix + '_reservation') != 'RELEASED':
self.send_command(self.cli_prefix + '_reservation relinquish')
def release(self):
""" Release object.
XenaManager-2G -> Release Chassis/Module/Port.
"""
if self.get_attribute(self.cli_prefix + '_reservation') == 'RESERVED_BY_YOU':
self.send_command(self.cli_prefix + '_reservation release')
def send_command(self, command, *arguments):
""" Send command with no output.
:param command: command to send.
:param arguments: list of command arguments.
"""
self.api.send_command(self, command, *arguments)
def send_command_return(self, command, *arguments):
""" Send command and wait for single line output. """
return self.api.send_command_return(self, command, *arguments)
def send_command_return_multilines(self, command, *arguments):
""" Send command and wait for multiple lines output. """
return self.api.send_command_return_multilines(self, command, *arguments)
def set_attributes(self, **attributes):
""" Sets list of attributes.
:param attributes: dictionary of {attribute: value} to set.
"""
try:
self.api.set_attributes(self, **attributes)
except Exception as e:
if '<notwritable>' in repr(e).lower() or '<badvalue>' in repr(e).lower():
raise XenaAttributeError(e)
else:
raise e
def get_attribute(self, attribute):
""" Returns single object attribute.
:param attribute: requested attribute to query.
:returns: returned value.
:rtype: str
"""
try:
return self.api.get_attribute(self, attribute)
except Exception as e:
if '#syntax error' in repr(e).lower() or 'keyerror' in repr(e).lower():
raise XenaAttributeError(e)
else:
raise e
def get_attributes(self):
""" Returns all object's attributes.
:returns: dictionary of <name, value> of all attributes.
:rtype: dict of (str, str)
"""
return self.api.get_attributes(self)
def wait_for_states(self, attribute, timeout=40, *states):
for _ in range(timeout):
if self.get_attribute(attribute).lower() in [s.lower() for s in states]:
return
time.sleep(1)
raise TgnError('{} failed to reach state {}, state is {} after {} seconds'.
format(attribute, states, self.get_attribute(attribute), timeout))
def read_stat(self, captions, stat_name):
return dict(zip(captions, self.api.get_stats(self, stat_name)))
#
# Private methods.
#
def _build_index_command(self, command, *arguments):
return ('{} {}' + len(arguments) * ' {}').format(self.index, command, *arguments)
def _extract_return(self, command, index_command_value):
return re.sub('{}\s*{}\s*'.format(self.index, command.upper()), '', index_command_value)
def _get_index_len(self):
return len(self.index.split())
def _get_command_len(self):
return len(self.index.split())
class XenaObject21(XenaObject):
""" Base class for all Xena objects with index_len = 2 and command_len = 1. """
#
# Private methods.
#
def _build_index_command(self, command, *arguments):
module, port, sid = self.index.split('/')
return ('{}/{} {} [{}]' + len(arguments) * ' {}').format(module, port, command, sid, *arguments)
def _extract_return(self, command, index_command_value):
module, port, sid = self.index.split('/')
return re.sub('{}/{}\s*{}\s*\[{}\]\s*'.format(module, port, command.upper(), sid), '', index_command_value)
def _get_index_len(self):
return 2
def _get_command_len(self):
return 1
| [
"logging.getLogger",
"collections.OrderedDict.__getitem__",
"trafficgenerator.tgn_object.TgnObjectsDict.__getitem__",
"time.sleep"
] | [((289, 316), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (306, 316), False, 'import logging\n'), ((534, 571), 'trafficgenerator.tgn_object.TgnObjectsDict.__getitem__', 'TgnObjectsDict.__getitem__', (['self', 'key'], {}), '(self, key)\n', (560, 571), False, 'from trafficgenerator.tgn_object import TgnObject, TgnObjectsDict\n'), ((604, 641), 'trafficgenerator.tgn_object.TgnObjectsDict.__getitem__', 'TgnObjectsDict.__getitem__', (['self', 'key'], {}), '(self, key)\n', (630, 641), False, 'from trafficgenerator.tgn_object import TgnObject, TgnObjectsDict\n'), ((4954, 4967), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4964, 4967), False, 'import time\n'), ((749, 783), 'collections.OrderedDict.__getitem__', 'OrderedDict.__getitem__', (['self', 'obj'], {}), '(self, obj)\n', (772, 783), False, 'from collections import OrderedDict\n')] |
# -*- coding: utf-8 -*-
import unittest
import os # noqa: F401
import json # noqa: F401
import time
import shutil
import re
import sys
import datetime
import collections
#import simplejson
from os import environ
try:
from ConfigParser import ConfigParser # py2
except:
from configparser import ConfigParser # py3
from pprint import pprint # noqa: F401
from GenomeFileUtil.GenomeFileUtilImpl import GenomeFileUtil
from GenomeFileUtil.core.GenomeInterface import GenomeInterface
from GenomeFileUtil.GenomeFileUtilImpl import SDKConfig
from GenomeFileUtil.GenomeFileUtilServer import MethodContext
from GenomeFileUtil.core.FastaGFFToGenome import FastaGFFToGenome
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.WorkspaceClient import Workspace as workspaceService
class FastaGFFToGenomeUploadTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('setting up class')
token = environ.get('KB_AUTH_TOKEN', None)
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'provenance': [
{'service': 'GenomeFileUtil',
'method': 'please_never_use_it_in_production',
'method_params': []}
],
'authenticated': 1})
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('GenomeFileUtil'):
cls.cfg[nameval[0]] = nameval[1]
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=token)
cls.serviceImpl = GenomeFileUtil(cls.cfg)
cls.dfu = DataFileUtil(os.environ['SDK_CALLBACK_URL'], token=token)
cls.scratch = cls.cfg['scratch']
cls.shockURL = cls.cfg['shock-url']
cls.gfu_cfg = SDKConfig(cls.cfg)
cls.wsName = "Phytozome_Genomes"
cls.prepare_data()
# @classmethod
# def tearDownClass(cls):
# if hasattr(cls, 'wsName'):
# cls.wsClient.delete_workspace({'workspace': cls.wsName})
# print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_GenomeFileUtil_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
@classmethod
def prepare_data(cls):
cls.dtn_root = "/kb/module/genomes/Phytozome/"
def compile_ontology(self, Annotation_File, Identifier_Column):
annotations = dict()
#hardcoded header for now
annotation_header=[]
ontology_column=9
if not os.path.isfile(Annotation_File):
return annotations
with open(Annotation_File) as f:
for line in f:
line=line.strip()
if(line.startswith("#pacId")):
#Store header
annotation_header=line.split('\t')
continue
annotation_items=line.split('\t')
#Skip empty lines
if(len(annotation_items) <= 1 or len(annotation_items)<=ontology_column):
continue
#Skip empty ontology
if(annotation_items[ontology_column]==""):
continue
annotation_dict=dict()
for entry in annotation_items[ontology_column].split(","):
if(entry == ''):
continue
entry=entry.replace("GO:GO:","GO:")
annotation_dict[entry]=1
annotations[annotation_items[Identifier_Column]]=annotation_dict
return annotations
def compile_functions(self, Functions_File, Identifier_Column=0,Functions_Column=1,EC_Column=-1):
functions = dict()
if not os.path.isfile(Functions_File):
return functions
with open(Functions_File) as f:
for line in f:
line=line.strip()
function_items=line.split('\t')
if(len(function_items) <= Functions_Column):
Functions_Column -= 1
if(function_items[Functions_Column] == ""):
continue
Function = function_items[Functions_Column]
if(EC_Column != -1):
Function+=" (EC "+function_items[EC_Column]+")"
if(function_items[Identifier_Column] not in functions):
functions[function_items[Identifier_Column]]=dict()
functions[function_items[Identifier_Column]][Function]=1
return functions
def compile_synonyms(self, Synonyms_File, Identifier_Column=0, Synonym_Column=1):
synonyms=dict()
if not os.path.isfile(Synonyms_File):
return synonyms
with open(Synonyms_File) as f:
for line in f:
line=line.strip()
synonyms_items=line.split('\t')
if(len(synonyms_items) <= Synonym_Column or synonyms_items[Synonym_Column] == ""):
continue
Synonym = synonyms_items[Synonym_Column]
if(synonyms_items[Identifier_Column] not in synonyms):
synonyms[synonyms_items[Identifier_Column]]=dict()
synonyms[synonyms_items[Identifier_Column]][Synonym]=1
return synonyms
def test_phytozome_to_genome(self):
#Read Species Names
SN_File = os.path.join('data','Phytozome_Names.txt')
Species_Names_Dict={}
with open(SN_File) as f:
for line in f:
line=line.strip()
array = line.split('\t')
Species_Names_Dict[array[0]]=array[1]
GM_File = os.path.join('data','Accepted_Phytozome_Versions_GeneModels.txt')
Species_Dict=dict()
with open(GM_File) as f:
for line in f:
line=line.strip('\r\n')
(phytozome_release,species,phytozome_identifier,genemodel_version,tax_id)=line.split('\t')
if(species not in Species_Dict):
Species_Dict[species]=dict()
Species_Dict[species][genemodel_version]={'release':phytozome_release,
'identifier':phytozome_identifier,
'tax_id':tax_id}
Species_Versions_to_Skip=dict()
with open(os.path.join('data','Phytozome_Upload_Summary.txt')) as f:
for line in f:
line=line.strip()
array = line.split('\t')
Species_Version=array[0]
Species_Versions_to_Skip[Species_Version]=1
# Begin iterating through species to load them
summary_file=open(os.path.join('data','Phytozome_Upload_Summary.txt'),'w')
for species in sorted(Species_Dict):
for version in sorted(Species_Dict[species]):
Genome_Name = species+"_"+version
if(Genome_Name in Species_Versions_to_Skip):
continue
phytozome_version=Species_Dict[species][version]['release']
sp=species
#Special exception: Zmays PH207
if(species == "Zmays" and "PH207" in version):
sp=species+version.split('_')[0]
path = os.path.join(self.dtn_root,'Phytozome'+phytozome_version,sp)
if(os.path.isdir(path) is not True):
print("Path Not Found: "+path)
continue
has_assembly=False
for files in os.listdir(path):
if(files=="assembly"):
has_assembly=True
if(has_assembly is False):
for version_in_path in os.listdir(path):
if(version_in_path == version):
for files in os.listdir(os.path.join(path,version_in_path)):
if(files=="assembly"):
path=os.path.join(path,version_in_path)
has_assembly=True
#Assembly file retrieval, should only find one, if any
assembly_file = os.listdir(os.path.join(path,'assembly'))[0]
#Annotation file retrieval, at least one, maybe two or three
gff_file = ""
functions_file = ""
ontology_file = ""
names_file = ""
for ann_file in os.listdir(os.path.join(path,'annotation')):
if('gene' in ann_file):
gff_file = ann_file
elif('defline' in ann_file):
functions_file = ann_file
elif('info' in ann_file):
ontology_file = ann_file
elif('synonym' in ann_file):
names_file = ann_file
Fa_Path = os.path.join(path,'assembly',assembly_file)
Gff_Path = os.path.join(path,'annotation',gff_file)
phytozome_version = phytozome_version.split('_')[0]
tax_id = Species_Dict[species][version]['tax_id']
input_params = {'fasta_file': {'path': Fa_Path},
'gff_file': {'path': Gff_Path},
'genome_name': Genome_Name,
'workspace_name': self.getWsName(),
'source': 'JGI Phytozome '+phytozome_version,
'source_id' : version,
'type': 'Reference',
'scientific_name': Species_Names_Dict[species],
'taxon_id': tax_id,
'genetic_code':1}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
# Load Genome Object in order to add additional data
Genome_Result = self.dfu.get_objects({'object_refs':[self.wsName+'/'+Genome_Name]})['data'][0]
Genome_Object = Genome_Result['data']
############################################################
# Functions
###########################################################
Functions_Path = os.path.join(path,'annotation',functions_file)
Functions = self.compile_functions(Functions_Path,0,2,1)
print("Functions compiled")
summary_file.write(Genome_Name+'\t'+functions_file+'\t'+str(len(list(Functions)))+'\n')
Found_Count={'features':0,'mrnas':0,'cdss':0}
if(len(list(Functions))>0):
for key in Found_Count:
print("Searching: "+key+"\t"+Genome_Object[key][0]['id'])
for entity in Genome_Object[key]:
if(entity['id'] in Functions):
entity["functions"]=sorted(Functions[entity['id']].keys())
Found_Count[key]+=1
# If no features were annotated, and mrnas were annotated
# use parent_gene to do transfer annotation
parent_feature_functions = collections.defaultdict(dict)
if(Found_Count['features']==0 and Found_Count['mrnas']!=0):
#Create lookup dict
parent_feature_index = dict([(f['id'], i) for i, f in enumerate(Genome_Object['features'])])
for mrna in Genome_Object['mrnas']:
if('functions' in mrna):
parent_feature = parent_feature_index[mrna['parent_gene']]
for function in mrna['functions']:
parent_feature_functions[parent_feature][function]=1
for index in parent_feature_functions:
Genome_Object['features'][index]['functions']=sorted(parent_feature_functions[index].keys())
Found_Count['features']+=1
summary_file.write(Genome_Name+'\t'+functions_file+'\t'+str(Found_Count)+'\n')
############################################################
# Ontology
###########################################################
#Parse Annotation File
Annotation_Path = os.path.join(path,'annotation',ontology_file)
Feature_Ontology = self.compile_ontology(Annotation_Path,1)
mRNA_Ontology = self.compile_ontology(Annotation_Path,2)
print("Ontology compiled")
summary_file.write(Genome_Name+'\t'+ontology_file+'\t'+str(len(Feature_Ontology.keys()))+'\n')
summary_file.write(Genome_Name+'\t'+ontology_file+'\t'+str(len(mRNA_Ontology.keys()))+'\n')
#Retrieve OntologyDictionary
Ontology_Dictionary = self.dfu.get_objects({'object_refs':["KBaseOntology/gene_ontology"]})['data'][0]['data']['term_hash']
time_string = str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d_%H_%M_%S'))
Found_Count={'features':0,'mrnas':0,'cdss':0}
if(len(Feature_Ontology.keys())!=0 or len(mRNA_Ontology.keys())!=0):
for key in Found_Count:
print("Searching: "+key+"\t"+Genome_Object[key][0]['id'])
for entity in Genome_Object[key]:
if(entity['id'] in Feature_Ontology):
ontology_terms = dict()
ontology_terms["GO"]=dict()
for Ontology_Term in Feature_Ontology[entity["id"]].keys():
if(Ontology_Term not in Ontology_Dictionary):
continue
if(Ontology_Term not in ontology_terms["GO"]):
OntologyEvidence=[{"method":"GFF_Fasta_Genome_to_KBaseGenomes_Genome",
"timestamp":time_string,"method_version":"1.0"},
{"method":"Phytozome annotation_info.txt",
"timestamp":time_string,"method_version":"11"}]
OntologyData={"id":Ontology_Term,
"ontology_ref":"KBaseOntology/gene_ontology",
"term_name":Ontology_Dictionary[Ontology_Term]["name"],
"term_lineage":[],
"evidence":OntologyEvidence}
ontology_terms["GO"][Ontology_Term]=OntologyData
entity["ontology_terms"]=ontology_terms
Found_Count[key]+=1
if(entity['id'] in mRNA_Ontology):
ontology_terms = dict()
ontology_terms["GO"]=dict()
for Ontology_Term in mRNA_Ontology[entity["id"]].keys():
if(Ontology_Term not in Ontology_Dictionary):
continue
if(Ontology_Term not in ontology_terms["GO"]):
OntologyEvidence=[{"method":"GFF_Fasta_Genome_to_KBaseGenomes_Genome",
"timestamp":time_string,"method_version":"1.0"},
{"method":"Phytozome annotation_info.txt",
"timestamp":time_string,"method_version":"11"}]
OntologyData={"id":Ontology_Term,
"ontology_ref":"KBaseOntology/gene_ontology",
"term_name":Ontology_Dictionary[Ontology_Term]["name"],
"term_lineage":[],
"evidence":OntologyEvidence}
ontology_terms["GO"][Ontology_Term]=OntologyData
entity["ontology_terms"]=ontology_terms
Found_Count[key]+=1
summary_file.write(Genome_Name+'\t'+ontology_file+'\t'+str(Found_Count)+'\n')
############################################################
# Synonyms
###########################################################
Synonyms_Path = os.path.join(path,'annotation',names_file)
Synonyms = self.compile_synonyms(Synonyms_Path,0,1)
print("Synonyms compiled")
summary_file.write(Genome_Name+'\t'+names_file+'\t'+str(len(list(Synonyms)))+'\n')
Found_Count={'features':0,'mrnas':0,'cdss':0}
if(len(list(Synonyms))>0):
for key in Found_Count:
print("Searching: "+key+"\t"+Genome_Object[key][0]['id'])
for entity in Genome_Object[key]:
if(entity['id'] in Synonyms):
if("aliases" not in entity):
entity["aliases"]=list()
for synonym in sorted(Synonyms[entity['id']]):
entity["aliases"].append(["JGI",synonym])
Found_Count[key]+=1
# If no features were annotated, and mrnas were annotated
# use parent_gene to do transfer annotation
parent_feature_synonyms = collections.defaultdict(dict)
if(Found_Count['features']==0 and Found_Count['mrnas']!=0):
#Create lookup dict
parent_feature_index = dict([(f['id'], i) for i, f in enumerate(Genome_Object['features'])])
for mrna in Genome_Object['mrnas']:
if(mrna['id'] in Synonyms):
if("aliases" not in mrna):
mrna["aliases"]=list()
for synonym in sorted(Synonyms[mrna['id']]):
mrna["aliases"].append(["JGI",synonym])
if('aliases' in mrna):
parent_feature = parent_feature_index[mrna['parent_gene']]
for synonym in mrna['aliases']:
parent_feature_synonyms[parent_feature][synonym[1]]=1
for index in parent_feature_synonyms:
if("aliases" not in Genome_Object['features'][index]):
Genome_Object['features'][index]['aliases']=list()
for synonym in sorted(parent_feature_synonyms[index].keys()):
Genome_Object['features'][index]['aliases'].append(("JGI",synonym))
Found_Count['features']+=1
summary_file.write(Genome_Name+'\t'+names_file+'\t'+str(Found_Count)+'\n')
############################################################
# Saving
###########################################################
#Save Genome Object
#genome_string = simplejson.dumps(Genome_Object, sort_keys=True, indent=4, ensure_ascii=False)
#genome_file = open(self.scratch+'/'+Genome_Name+'.json', 'w+')
#genome_file.write(genome_string)
#genome_file.close()
#Retaining metadata
Genome_Meta = Genome_Result['info'][10]
Workspace_ID = Genome_Result['info'][6]
save_result = self.getImpl().save_one_genome(self.getContext(),
{'workspace' : self.wsName,
'name' : Genome_Name,
'data' : Genome_Object,
'upgrade' : 1})
#Saving metadata
Genome_Result = self.dfu.get_objects({'object_refs':[self.wsName+'/'+Genome_Name]})['data'][0]
Genome_Object = Genome_Result['data']
self.dfu.save_objects({'id':Workspace_ID,
'objects' : [ {'type': 'KBaseGenomes.Genome',
'data': Genome_Object,
'meta' : Genome_Meta,
'name' : Genome_Name} ]})
summary_file.flush()
summary_file.close()
| [
"os.listdir",
"configparser.ConfigParser",
"GenomeFileUtil.GenomeFileUtilImpl.SDKConfig",
"os.environ.get",
"installed_clients.WorkspaceClient.Workspace",
"os.path.join",
"os.path.isfile",
"GenomeFileUtil.GenomeFileUtilServer.MethodContext",
"os.path.isdir",
"collections.defaultdict",
"time.time... | [((962, 996), 'os.environ.get', 'environ.get', (['"""KB_AUTH_TOKEN"""', 'None'], {}), "('KB_AUTH_TOKEN', None)\n", (973, 996), False, 'from os import environ\n'), ((1015, 1034), 'GenomeFileUtil.GenomeFileUtilServer.MethodContext', 'MethodContext', (['None'], {}), '(None)\n', (1028, 1034), False, 'from GenomeFileUtil.GenomeFileUtilServer import MethodContext\n'), ((1397, 1438), 'os.environ.get', 'environ.get', (['"""KB_DEPLOYMENT_CONFIG"""', 'None'], {}), "('KB_DEPLOYMENT_CONFIG', None)\n", (1408, 1438), False, 'from os import environ\n'), ((1477, 1491), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1489, 1491), False, 'from configparser import ConfigParser\n'), ((1693, 1733), 'installed_clients.WorkspaceClient.Workspace', 'workspaceService', (['cls.wsURL'], {'token': 'token'}), '(cls.wsURL, token=token)\n', (1709, 1733), True, 'from installed_clients.WorkspaceClient import Workspace as workspaceService\n'), ((1760, 1783), 'GenomeFileUtil.GenomeFileUtilImpl.GenomeFileUtil', 'GenomeFileUtil', (['cls.cfg'], {}), '(cls.cfg)\n', (1774, 1783), False, 'from GenomeFileUtil.GenomeFileUtilImpl import GenomeFileUtil\n'), ((1803, 1860), 'installed_clients.DataFileUtilClient.DataFileUtil', 'DataFileUtil', (["os.environ['SDK_CALLBACK_URL']"], {'token': 'token'}), "(os.environ['SDK_CALLBACK_URL'], token=token)\n", (1815, 1860), False, 'from installed_clients.DataFileUtilClient import DataFileUtil\n'), ((1968, 1986), 'GenomeFileUtil.GenomeFileUtilImpl.SDKConfig', 'SDKConfig', (['cls.cfg'], {}), '(cls.cfg)\n', (1977, 1986), False, 'from GenomeFileUtil.GenomeFileUtilImpl import SDKConfig\n'), ((5950, 5993), 'os.path.join', 'os.path.join', (['"""data"""', '"""Phytozome_Names.txt"""'], {}), "('data', 'Phytozome_Names.txt')\n", (5962, 5993), False, 'import os\n'), ((6231, 6297), 'os.path.join', 'os.path.join', (['"""data"""', '"""Accepted_Phytozome_Versions_GeneModels.txt"""'], {}), "('data', 'Accepted_Phytozome_Versions_GeneModels.txt')\n", (6243, 6297), False, 'import os\n'), ((3080, 3111), 'os.path.isfile', 'os.path.isfile', (['Annotation_File'], {}), '(Annotation_File)\n', (3094, 3111), False, 'import os\n'), ((4285, 4315), 'os.path.isfile', 'os.path.isfile', (['Functions_File'], {}), '(Functions_File)\n', (4299, 4315), False, 'import os\n'), ((5228, 5257), 'os.path.isfile', 'os.path.isfile', (['Synonyms_File'], {}), '(Synonyms_File)\n', (5242, 5257), False, 'import os\n'), ((7290, 7342), 'os.path.join', 'os.path.join', (['"""data"""', '"""Phytozome_Upload_Summary.txt"""'], {}), "('data', 'Phytozome_Upload_Summary.txt')\n", (7302, 7342), False, 'import os\n'), ((2434, 2445), 'time.time', 'time.time', ([], {}), '()\n', (2443, 2445), False, 'import time\n'), ((6946, 6998), 'os.path.join', 'os.path.join', (['"""data"""', '"""Phytozome_Upload_Summary.txt"""'], {}), "('data', 'Phytozome_Upload_Summary.txt')\n", (6958, 6998), False, 'import os\n'), ((7884, 7948), 'os.path.join', 'os.path.join', (['self.dtn_root', "('Phytozome' + phytozome_version)", 'sp'], {}), "(self.dtn_root, 'Phytozome' + phytozome_version, sp)\n", (7896, 7948), False, 'import os\n'), ((8144, 8160), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (8154, 8160), False, 'import os\n'), ((9523, 9568), 'os.path.join', 'os.path.join', (['path', '"""assembly"""', 'assembly_file'], {}), "(path, 'assembly', assembly_file)\n", (9535, 9568), False, 'import os\n'), ((9594, 9636), 'os.path.join', 'os.path.join', (['path', '"""annotation"""', 'gff_file'], {}), "(path, 'annotation', gff_file)\n", (9606, 9636), False, 'import os\n'), ((10942, 10990), 'os.path.join', 'os.path.join', (['path', '"""annotation"""', 'functions_file'], {}), "(path, 'annotation', functions_file)\n", (10954, 10990), False, 'import os\n'), ((11881, 11910), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (11904, 11910), False, 'import collections\n'), ((13047, 13094), 'os.path.join', 'os.path.join', (['path', '"""annotation"""', 'ontology_file'], {}), "(path, 'annotation', ontology_file)\n", (13059, 13094), False, 'import os\n'), ((17576, 17620), 'os.path.join', 'os.path.join', (['path', '"""annotation"""', 'names_file'], {}), "(path, 'annotation', names_file)\n", (17588, 17620), False, 'import os\n'), ((18686, 18715), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (18709, 18715), False, 'import collections\n'), ((7965, 7984), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (7978, 7984), False, 'import os\n'), ((8334, 8350), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (8344, 8350), False, 'import os\n'), ((9085, 9117), 'os.path.join', 'os.path.join', (['path', '"""annotation"""'], {}), "(path, 'annotation')\n", (9097, 9117), False, 'import os\n'), ((8797, 8827), 'os.path.join', 'os.path.join', (['path', '"""assembly"""'], {}), "(path, 'assembly')\n", (8809, 8827), False, 'import os\n'), ((8460, 8495), 'os.path.join', 'os.path.join', (['path', 'version_in_path'], {}), '(path, version_in_path)\n', (8472, 8495), False, 'import os\n'), ((13756, 13767), 'time.time', 'time.time', ([], {}), '()\n', (13765, 13767), False, 'import time\n'), ((8593, 8628), 'os.path.join', 'os.path.join', (['path', 'version_in_path'], {}), '(path, version_in_path)\n', (8605, 8628), False, 'import os\n')] |
#! /usr/bin/env python3
import sys
import random
import os
from faker import Factory as FFactory
OUTFILE = "samples.xmi"
NUM_SAMPLES = 10
NUM_COUNTRIES = 4
TEMPLATE = """<?xml version="1.0" encoding="ASCII"?>
<person:Root
xmi:version="2.0"
xmlns:xmi="http://www.omg.org/XMI"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:person="http://www.bestsolution.at/framework/grid/personsample/1.0"
xsi:schemaLocation="http://www.bestsolution.at/framework/grid/personsample/1.0 ../model/Person.xcore#/EPackage">
{0}
</person:Root>
"""
TEMPLATE_COUNTRY = """<countries name="{0}"/>"""
TEMPLATE_PERSON = """<persons firstname="{0}"
lastname="{1}"
gender="{2}"
married="{3}"
birthdate="{4}">
<address
street="{5}"
number="{6}"
zipcode="{7}"
city="{8}"
country="//@countries.{9}"/>
</persons>
"""
COUNTRIES = []
PERSONS = []
def fake_xmi():
faker = FFactory.create()
for i in range(NUM_SAMPLES):
PERSONS.append(
TEMPLATE_PERSON.format(
faker.first_name(),
faker.last_name(),
"MALE" if faker.boolean() is True else "FEMALE",
faker.boolean(),
faker.iso8601(),
faker.street_name(),
faker.building_number(),
faker.postcode(),
faker.city(),
random.randint(0, NUM_COUNTRIES - 1)
)
)
for i in range(NUM_COUNTRIES):
COUNTRIES.append(
TEMPLATE_COUNTRY.format(faker.country())
)
with open(OUTFILE, "w") as text_file:
text_file.write(
TEMPLATE.format(
os.linesep.join(
[os.linesep.join(PERSONS), os.linesep.join(COUNTRIES)]
)
)
)
if __name__ == "__main__":
if "-n" in sys.argv:
position_param = sys.argv.index("-n")
NUM_SAMPLES = int(sys.argv[position_param + 1])
sys.argv.pop(position_param)
sys.argv.pop(position_param)
if len(sys.argv) > 1:
OUTFILE = sys.argv.pop()
print("Writing samples to {0}.".format(OUTFILE))
fake_xmi()
| [
"os.linesep.join",
"faker.Factory.create",
"sys.argv.index",
"random.randint",
"sys.argv.pop"
] | [((952, 969), 'faker.Factory.create', 'FFactory.create', ([], {}), '()\n', (967, 969), True, 'from faker import Factory as FFactory\n'), ((1935, 1955), 'sys.argv.index', 'sys.argv.index', (['"""-n"""'], {}), "('-n')\n", (1949, 1955), False, 'import sys\n'), ((2020, 2048), 'sys.argv.pop', 'sys.argv.pop', (['position_param'], {}), '(position_param)\n', (2032, 2048), False, 'import sys\n'), ((2057, 2085), 'sys.argv.pop', 'sys.argv.pop', (['position_param'], {}), '(position_param)\n', (2069, 2085), False, 'import sys\n'), ((2131, 2145), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (2143, 2145), False, 'import sys\n'), ((1423, 1459), 'random.randint', 'random.randint', (['(0)', '(NUM_COUNTRIES - 1)'], {}), '(0, NUM_COUNTRIES - 1)\n', (1437, 1459), False, 'import random\n'), ((1760, 1784), 'os.linesep.join', 'os.linesep.join', (['PERSONS'], {}), '(PERSONS)\n', (1775, 1784), False, 'import os\n'), ((1786, 1812), 'os.linesep.join', 'os.linesep.join', (['COUNTRIES'], {}), '(COUNTRIES)\n', (1801, 1812), False, 'import os\n')] |
from unittest import TestCase
from unittest.mock import Mock, patch
from typeseam.app import (
load_initial_data,
)
class TestModels(TestCase):
@patch('typeseam.app.os.environ.get')
def test_load_initial_data(self, env_get):
ctx = Mock(return_value=Mock(
__exit__=Mock(),
__enter__=Mock()))
app = Mock(app_context=ctx)
load_initial_data(app) | [
"typeseam.app.load_initial_data",
"unittest.mock.patch",
"unittest.mock.Mock"
] | [((161, 197), 'unittest.mock.patch', 'patch', (['"""typeseam.app.os.environ.get"""'], {}), "('typeseam.app.os.environ.get')\n", (166, 197), False, 'from unittest.mock import Mock, patch\n'), ((357, 378), 'unittest.mock.Mock', 'Mock', ([], {'app_context': 'ctx'}), '(app_context=ctx)\n', (361, 378), False, 'from unittest.mock import Mock, patch\n'), ((387, 409), 'typeseam.app.load_initial_data', 'load_initial_data', (['app'], {}), '(app)\n', (404, 409), False, 'from typeseam.app import load_initial_data\n'), ((304, 310), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (308, 310), False, 'from unittest.mock import Mock, patch\n'), ((334, 340), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (338, 340), False, 'from unittest.mock import Mock, patch\n')] |
import sys
sys.path.append('../')
from abc import ABCMeta, abstractmethod
# https://www.python-course.eu/python3_abstract_classes.php
import logging
import oandapyV20
from oandapyV20 import API
import oandapyV20.endpoints.orders as orders
from oandapyV20.contrib.requests import MarketOrderRequest
class ExecutionHandler(object):
"""
Provides an abstract base class to handle all execution in the backtesting
and live trading system.
"""
__metaclass__ = ABCMeta
@abstractmethod
def execute_order(self):
"""
Send the order to the brokerage
"""
raise NotImplementedError("Should implement execute_order()")
class SimulatedExecution(object):
"""
Provides a simulated execution handling environment. This class actually
does nothing - it simply receives and order to execute.
Instead, the Portfolio object actually provides fill handling. This will
be modified in later versions.
"""
def execute_order(self, event):
pass
class OANDAExecutionHandler(ExecutionHandler):
def __init__(self, domain, access_token, account_id):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.client = self.create_OADAN_client()
self.logger = logging.getLogger(__name__)
def create_OADAN_client(self):
return API(self.access_token)
def execute_order(self, event):
print("execute order")
instrument = "%s_%s" % (event.instrument[:3], event.instrument[3:])
units = event.units
#Market order
mo = MarketOrderRequest(instrument=instrument, units=units)
print(mo)
# Create order request
request = orders.OrderCreate(self.account_id, data=mo.data)
print(request)
# perform the request
rv = self.client.request(request)
print(rv)
self.logger.debug(rv)
| [
"logging.getLogger",
"oandapyV20.endpoints.orders.OrderCreate",
"oandapyV20.contrib.requests.MarketOrderRequest",
"sys.path.append",
"oandapyV20.API"
] | [((12, 34), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (27, 34), False, 'import sys\n'), ((1387, 1414), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1404, 1414), False, 'import logging\n'), ((1487, 1509), 'oandapyV20.API', 'API', (['self.access_token'], {}), '(self.access_token)\n', (1490, 1509), False, 'from oandapyV20 import API\n'), ((1758, 1812), 'oandapyV20.contrib.requests.MarketOrderRequest', 'MarketOrderRequest', ([], {'instrument': 'instrument', 'units': 'units'}), '(instrument=instrument, units=units)\n', (1776, 1812), False, 'from oandapyV20.contrib.requests import MarketOrderRequest\n'), ((1883, 1932), 'oandapyV20.endpoints.orders.OrderCreate', 'orders.OrderCreate', (['self.account_id'], {'data': 'mo.data'}), '(self.account_id, data=mo.data)\n', (1901, 1932), True, 'import oandapyV20.endpoints.orders as orders\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
'''
Use dependency versions from a conda enviornment .yaml file to update
a recipe/meta.yaml file of a given package. Such an input file can
be created from the given environment with:
conda env export > myenv.yaml
'''
import sys, os, re
if len(sys.argv) < 3:
print("Usage: " + os.path.basename(sys.argv[0]) + " input.yaml mypackage-feedstock")
sys.exit(1)
inFile = sys.argv[1]
outDir = sys.argv[2]
outFile = outDir + "/recipe/meta.yaml"
if not os.path.exists(outFile):
print("Cannot open file: " + outFile)
sys.exit(1)
# parse the versions from the conda env
conda_env = {}
print("Reading: " + inFile)
inHandle = open(inFile, 'r')
lines = inHandle.readlines()
for line in lines:
# Wipe comments
m = re.match('^(.*?)\#', line)
if m:
line = m.group(1)
# Match the package
m = re.match('^\s*-\s*(.*?)\s*=+\s*(.*?)(=|\s|$)', line)
if not m:
continue
package = m.group(1)
version = m.group(2)
if re.match('^\s*$', package):
continue # ignore empty lines
conda_env[package] = version
#print("got ", package, version)
# Update the lines in the output ile
outHandle = open(outFile, 'r')
lines = outHandle.readlines()
for it in range(len(lines)):
line = lines[it]
# Ignore comments
m = re.match('^\#', line)
if m:
continue
# Match the package
m = re.match('^(\s+-[\t ]+)([^\s]+)(\s*)(.*?)$', line)
if not m:
continue
pre = m.group(1)
package = m.group(2)
spaces = m.group(3).rstrip("\n")
old_version = m.group(4).rstrip("\n")
if spaces == "":
# Ensure there's at least one space
spaces = " "
if old_version == "":
# If there was no version before, don't put one now
continue
if not package in conda_env:
continue
version = conda_env[package]
if old_version != version:
if ('[linux]' in old_version) or ('[osx]' in old_version):
# In this case the user better take a closer look
print("For package " + package + ", not replacing " +
old_version + " with " + version + ", a closer look is suggested.")
else:
print("For package " + package + ", replacing version "
+ old_version + " with " + version)
lines[it] = pre + package + spaces + version + ".\n"
# Save the updated lines to disk
print("Updating: " + outFile)
outHandle = open(outFile, "w")
outHandle.writelines(lines)
outHandle.close()
| [
"os.path.exists",
"re.match",
"os.path.basename",
"sys.exit"
] | [((1179, 1190), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1187, 1190), False, 'import sys, os, re\n'), ((1285, 1308), 'os.path.exists', 'os.path.exists', (['outFile'], {}), '(outFile)\n', (1299, 1308), False, 'import sys, os, re\n'), ((1356, 1367), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1364, 1367), False, 'import sys, os, re\n'), ((1558, 1585), 're.match', 're.match', (['"""^(.*?)\\\\#"""', 'line'], {}), "('^(.*?)\\\\#', line)\n", (1566, 1585), False, 'import sys, os, re\n'), ((1662, 1719), 're.match', 're.match', (['"""^\\\\s*-\\\\s*(.*?)\\\\s*=+\\\\s*(.*?)(=|\\\\s|$)"""', 'line'], {}), "('^\\\\s*-\\\\s*(.*?)\\\\s*=+\\\\s*(.*?)(=|\\\\s|$)', line)\n", (1670, 1719), False, 'import sys, os, re\n'), ((1803, 1830), 're.match', 're.match', (['"""^\\\\s*$"""', 'package'], {}), "('^\\\\s*$', package)\n", (1811, 1830), False, 'import sys, os, re\n'), ((2125, 2147), 're.match', 're.match', (['"""^\\\\#"""', 'line'], {}), "('^\\\\#', line)\n", (2133, 2147), False, 'import sys, os, re\n'), ((2207, 2260), 're.match', 're.match', (['"""^(\\\\s+-[\t ]+)([^\\\\s]+)(\\\\s*)(.*?)$"""', 'line'], {}), "('^(\\\\s+-[\\t ]+)([^\\\\s]+)(\\\\s*)(.*?)$', line)\n", (2215, 2260), False, 'import sys, os, re\n'), ((1108, 1137), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1124, 1137), False, 'import sys, os, re\n')] |
import cv2
import ProcessWithCV2
img1 = cv2.imread("D:/py/chinese/7.png")
img2 = cv2.imread("D:/py/chinese/8.png")
a = ProcessWithCV2.dHash(img1, img2, 1)
print(a)
| [
"ProcessWithCV2.dHash",
"cv2.imread"
] | [((44, 77), 'cv2.imread', 'cv2.imread', (['"""D:/py/chinese/7.png"""'], {}), "('D:/py/chinese/7.png')\n", (54, 77), False, 'import cv2\n'), ((86, 119), 'cv2.imread', 'cv2.imread', (['"""D:/py/chinese/8.png"""'], {}), "('D:/py/chinese/8.png')\n", (96, 119), False, 'import cv2\n'), ((125, 160), 'ProcessWithCV2.dHash', 'ProcessWithCV2.dHash', (['img1', 'img2', '(1)'], {}), '(img1, img2, 1)\n', (145, 160), False, 'import ProcessWithCV2\n')] |