hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3a66f6b02d61600747710a4b6d9b3fe30d5a5e | 7,734 | py | Python | cogs/logging.py | poketwo/support | 207b7da2e06e2b4cd044a18b0093dc5282125630 | [
"MIT"
] | null | null | null | cogs/logging.py | poketwo/support | 207b7da2e06e2b4cd044a18b0093dc5282125630 | [
"MIT"
] | null | null | null | cogs/logging.py | poketwo/support | 207b7da2e06e2b4cd044a18b0093dc5282125630 | [
"MIT"
] | null | null | null | import logging
from datetime import datetime, timezone
import discord
from discord.ext import commands, tasks
from helpers import checks
from pymongo import UpdateOne
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s: %(message)s")
class Logging(commands.Cog):
"""For logging."""
def __init__(self, bot):
self.bot = bot
self.log = logging.getLogger(f"Support")
handler = logging.FileHandler(f"logs/support.log")
handler.setFormatter(formatter)
self.log.handlers = [handler]
dlog = logging.getLogger("discord")
dhandler = logging.FileHandler(f"logs/discord.log")
dhandler.setFormatter(formatter)
dlog.handlers = [dhandler]
self.log.setLevel(logging.DEBUG)
dlog.setLevel(logging.INFO)
self.sync_all.start()
@tasks.loop(minutes=20)
async def sync_all(self):
for guild in self.bot.guilds:
await self.full_sync_guild(guild)
@sync_all.before_loop
async def before_sync_all(self):
return await self.bot.wait_until_ready()
def serialize_role(self, role):
return {
"id": role.id,
"name": role.name,
"color": role.color.value,
"position": role.position,
}
async def full_sync_guild(self, guild):
await self.bot.mongo.db.guild.bulk_write([self.make_sync_guild(guild)])
await self.bot.mongo.db.channel.bulk_write([self.make_sync_channel(channel) for channel in guild.channels])
await self.bot.mongo.db.member.bulk_write([self.make_sync_member(member) for member in guild.members])
def make_sync_guild(self, guild):
return UpdateOne(
{"_id": guild.id},
{
"$set": {
"name": guild.name,
"icon": str(guild.icon.url),
"roles": [self.serialize_role(x) for x in guild.roles],
}
},
upsert=True,
)
def make_sync_channel(self, channel):
base = {
"guild_id": channel.guild.id,
"type": str(channel.type),
"name": channel.name,
"position": channel.position,
}
if isinstance(channel, (discord.TextChannel, discord.VoiceChannel)):
base["category_id"] = channel.category_id
if isinstance(channel, discord.TextChannel):
base["last_message_id"] = channel.last_message_id
return UpdateOne({"_id": channel.id}, {"$set": base}, upsert=True)
def make_sync_member(self, member):
return UpdateOne(
{"_id": member.id, "guild_id": member.guild.id},
{
"$set": {
"name": member.name,
"discriminator": member.discriminator,
"nick": member.nick,
"avatar": str(member.display_avatar.url),
"roles": [x.id for x in member.roles],
}
},
upsert=True,
)
@commands.Cog.listener(name="on_guild_join")
@commands.Cog.listener(name="on_guild_update")
async def on_guild_updates(self, *args):
await self.bot.mongo.db.guild.bulk_write([self.make_sync_guild(args[-1])])
@commands.Cog.listener(name="on_member_join")
@commands.Cog.listener(name="on_member_update")
async def on_member_updates(self, *args):
thing = args[-1]
await self.bot.mongo.db.member.bulk_write([self.make_sync_member(thing)])
@commands.Cog.listener()
async def on_user_update(self, _, new):
for guild in self.bot.guilds:
member = guild.get_member(new.id)
if member is None:
continue
await self.bot.mongo.db.member.bulk_write([self.make_sync_member(member)])
@commands.Cog.listener()
async def on_guild_join(self, guild):
for channel in guild.channels:
await self.bot.mongo.db.channel.bulk_write([self.make_sync_channel(channel)])
@commands.Cog.listener(name="on_guild_channel_create")
@commands.Cog.listener(name="on_guild_channel_update")
async def on_guild_channel_updates(self, *args):
await self.bot.mongo.db.channel.bulk_write([self.make_sync_channel(args[-1])])
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
await self.bot.mongo.db.channel.delete_one({"_id": channel.id})
@commands.Cog.listener()
async def on_message(self, message):
if message.guild is None:
return
time = int(message.created_at.replace(tzinfo=timezone.utc).timestamp() - 3600)
await self.bot.mongo.db.message.insert_one(
{
"_id": message.id,
"user_id": message.author.id,
"channel_id": message.channel.id,
"guild_id": message.guild.id,
"history": {str(time): message.content},
"attachments": [
{"id": attachment.id, "filename": attachment.filename} for attachment in message.attachments
],
"deleted_at": None,
}
)
await self.bot.mongo.db.channel.update_one(
{"_id": message.channel.id}, {"$set": {"last_message_id": message.id}}
)
@commands.Cog.listener()
async def on_raw_message_edit(self, payload):
if "content" not in payload.data:
return
time = int(datetime.now().timestamp()) - 3600
await self.bot.mongo.db.message.update_one(
{"_id": payload.message_id},
{"$set": {f"history.{time}": payload.data["content"]}},
)
@commands.Cog.listener()
async def on_raw_message_delete(self, payload):
if payload.cached_message is not None:
for attachment in payload.cached_message.attachments:
fn = f"attachments/{attachment.id}_{attachment.filename}"
self.bot.loop.create_task(attachment.save(fn, use_cached=True))
await self.bot.mongo.db.message.update_one(
{"_id": payload.message_id},
{"$set": {"deleted_at": datetime.now(timezone.utc)}},
)
@commands.Cog.listener()
async def on_raw_bulk_message_delete(self, payload):
await self.bot.mongo.db.message.update_many(
{"_id": {"$in": list(payload.message_ids)}},
{"$set": {"deleted_at": datetime.now(timezone.utc)}},
)
@commands.group(invoke_without_command=True)
@checks.is_trial_moderator()
async def logs(self, ctx, *, channel: discord.TextChannel = None):
"""Gets a link to the message logs for a channel.
You must have the Trial Moderator role to use this.
"""
channel = channel or ctx.channel
await ctx.send(f"https://admin.poketwo.net/logs/{channel.guild.id}/{channel.id}")
@logs.command()
@checks.is_community_manager()
async def restrict(self, ctx, channel: discord.TextChannel = None):
"""Restricts the logs for a channel to Admins.
You must have the Community Manager role to use this.
"""
channel = channel or ctx.channel
await self.bot.mongo.db.channel.update_one({"_id": channel.id}, {"$set": {"restricted": True}})
await ctx.send(f"Restricted logs for **#{channel}** to Admins.")
@commands.command()
async def fullsync(self, ctx):
await ctx.send("Starting full guild resync...")
await self.full_sync_guild(ctx.guild)
await ctx.send("Completed full guild resync.")
async def cog_unload(self):
self.sync_all.cancel()
async def setup(bot):
await bot.add_cog(Logging(bot))
| 35.805556 | 115 | 0.604603 | import logging
from datetime import datetime, timezone
import discord
from discord.ext import commands, tasks
from helpers import checks
from pymongo import UpdateOne
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s: %(message)s")
class Logging(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.log = logging.getLogger(f"Support")
handler = logging.FileHandler(f"logs/support.log")
handler.setFormatter(formatter)
self.log.handlers = [handler]
dlog = logging.getLogger("discord")
dhandler = logging.FileHandler(f"logs/discord.log")
dhandler.setFormatter(formatter)
dlog.handlers = [dhandler]
self.log.setLevel(logging.DEBUG)
dlog.setLevel(logging.INFO)
self.sync_all.start()
@tasks.loop(minutes=20)
async def sync_all(self):
for guild in self.bot.guilds:
await self.full_sync_guild(guild)
@sync_all.before_loop
async def before_sync_all(self):
return await self.bot.wait_until_ready()
def serialize_role(self, role):
return {
"id": role.id,
"name": role.name,
"color": role.color.value,
"position": role.position,
}
async def full_sync_guild(self, guild):
await self.bot.mongo.db.guild.bulk_write([self.make_sync_guild(guild)])
await self.bot.mongo.db.channel.bulk_write([self.make_sync_channel(channel) for channel in guild.channels])
await self.bot.mongo.db.member.bulk_write([self.make_sync_member(member) for member in guild.members])
def make_sync_guild(self, guild):
return UpdateOne(
{"_id": guild.id},
{
"$set": {
"name": guild.name,
"icon": str(guild.icon.url),
"roles": [self.serialize_role(x) for x in guild.roles],
}
},
upsert=True,
)
def make_sync_channel(self, channel):
base = {
"guild_id": channel.guild.id,
"type": str(channel.type),
"name": channel.name,
"position": channel.position,
}
if isinstance(channel, (discord.TextChannel, discord.VoiceChannel)):
base["category_id"] = channel.category_id
if isinstance(channel, discord.TextChannel):
base["last_message_id"] = channel.last_message_id
return UpdateOne({"_id": channel.id}, {"$set": base}, upsert=True)
def make_sync_member(self, member):
return UpdateOne(
{"_id": member.id, "guild_id": member.guild.id},
{
"$set": {
"name": member.name,
"discriminator": member.discriminator,
"nick": member.nick,
"avatar": str(member.display_avatar.url),
"roles": [x.id for x in member.roles],
}
},
upsert=True,
)
@commands.Cog.listener(name="on_guild_join")
@commands.Cog.listener(name="on_guild_update")
async def on_guild_updates(self, *args):
await self.bot.mongo.db.guild.bulk_write([self.make_sync_guild(args[-1])])
@commands.Cog.listener(name="on_member_join")
@commands.Cog.listener(name="on_member_update")
async def on_member_updates(self, *args):
thing = args[-1]
await self.bot.mongo.db.member.bulk_write([self.make_sync_member(thing)])
@commands.Cog.listener()
async def on_user_update(self, _, new):
for guild in self.bot.guilds:
member = guild.get_member(new.id)
if member is None:
continue
await self.bot.mongo.db.member.bulk_write([self.make_sync_member(member)])
@commands.Cog.listener()
async def on_guild_join(self, guild):
for channel in guild.channels:
await self.bot.mongo.db.channel.bulk_write([self.make_sync_channel(channel)])
@commands.Cog.listener(name="on_guild_channel_create")
@commands.Cog.listener(name="on_guild_channel_update")
async def on_guild_channel_updates(self, *args):
await self.bot.mongo.db.channel.bulk_write([self.make_sync_channel(args[-1])])
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
await self.bot.mongo.db.channel.delete_one({"_id": channel.id})
@commands.Cog.listener()
async def on_message(self, message):
if message.guild is None:
return
time = int(message.created_at.replace(tzinfo=timezone.utc).timestamp() - 3600)
await self.bot.mongo.db.message.insert_one(
{
"_id": message.id,
"user_id": message.author.id,
"channel_id": message.channel.id,
"guild_id": message.guild.id,
"history": {str(time): message.content},
"attachments": [
{"id": attachment.id, "filename": attachment.filename} for attachment in message.attachments
],
"deleted_at": None,
}
)
await self.bot.mongo.db.channel.update_one(
{"_id": message.channel.id}, {"$set": {"last_message_id": message.id}}
)
@commands.Cog.listener()
async def on_raw_message_edit(self, payload):
if "content" not in payload.data:
return
time = int(datetime.now().timestamp()) - 3600
await self.bot.mongo.db.message.update_one(
{"_id": payload.message_id},
{"$set": {f"history.{time}": payload.data["content"]}},
)
@commands.Cog.listener()
async def on_raw_message_delete(self, payload):
if payload.cached_message is not None:
for attachment in payload.cached_message.attachments:
fn = f"attachments/{attachment.id}_{attachment.filename}"
self.bot.loop.create_task(attachment.save(fn, use_cached=True))
await self.bot.mongo.db.message.update_one(
{"_id": payload.message_id},
{"$set": {"deleted_at": datetime.now(timezone.utc)}},
)
@commands.Cog.listener()
async def on_raw_bulk_message_delete(self, payload):
await self.bot.mongo.db.message.update_many(
{"_id": {"$in": list(payload.message_ids)}},
{"$set": {"deleted_at": datetime.now(timezone.utc)}},
)
@commands.group(invoke_without_command=True)
@checks.is_trial_moderator()
async def logs(self, ctx, *, channel: discord.TextChannel = None):
channel = channel or ctx.channel
await ctx.send(f"https://admin.poketwo.net/logs/{channel.guild.id}/{channel.id}")
@logs.command()
@checks.is_community_manager()
async def restrict(self, ctx, channel: discord.TextChannel = None):
channel = channel or ctx.channel
await self.bot.mongo.db.channel.update_one({"_id": channel.id}, {"$set": {"restricted": True}})
await ctx.send(f"Restricted logs for **#{channel}** to Admins.")
@commands.command()
async def fullsync(self, ctx):
await ctx.send("Starting full guild resync...")
await self.full_sync_guild(ctx.guild)
await ctx.send("Completed full guild resync.")
async def cog_unload(self):
self.sync_all.cancel()
async def setup(bot):
await bot.add_cog(Logging(bot))
| true | true |
1c3a67f764910581a850d71f3429daa017619b4d | 748 | py | Python | All Tracks/Core CS/Algorithms/Implementation/Between Two Sets/Solution.py | Shaikh-Nabeel/HackerRank-Solutions | b69f4e3b78d75a231008788c497edcd6ded95fa9 | [
"MIT"
] | 13 | 2019-03-24T13:56:13.000Z | 2022-03-19T23:18:42.000Z | All Tracks/Core CS/Algorithms/Implementation/Between Two Sets/Solution.py | Shaikh-Nabeel/HackerRank-Solutions | b69f4e3b78d75a231008788c497edcd6ded95fa9 | [
"MIT"
] | 2 | 2020-04-18T13:04:06.000Z | 2020-05-18T19:54:32.000Z | All Tracks/Core CS/Algorithms/Implementation/Between Two Sets/Solution.py | Shaikh-Nabeel/HackerRank-Solutions | b69f4e3b78d75a231008788c497edcd6ded95fa9 | [
"MIT"
] | 27 | 2019-03-24T13:56:16.000Z | 2022-03-19T23:18:44.000Z | #!/bin/python3
import sys
from functools import reduce
from math import gcd
'''
1) Find the gcd G of the second set
2) Find the lcm L of the first set
3) Find the divisors D of G
4) The result is the number of times that an element of D is divisible by L
'''
if __name__ == "__main__":
n, m = map(int, input().strip().split(' '))
first_set = list(map(int, input().strip().split(' ')))
second_set = list(map(int, input().strip().split(' ')))
gcd_value = reduce(gcd, second_set)
lcm_value = reduce(lambda x, y: x * int(y / gcd(x, y)), first_set)
divisors = [divisor for divisor in range(1, gcd_value + 1) if gcd_value % divisor == 0]
print(sum(1 for divisor in divisors if divisor % lcm_value == 0))
| 31.166667 | 91 | 0.641711 |
import sys
from functools import reduce
from math import gcd
if __name__ == "__main__":
n, m = map(int, input().strip().split(' '))
first_set = list(map(int, input().strip().split(' ')))
second_set = list(map(int, input().strip().split(' ')))
gcd_value = reduce(gcd, second_set)
lcm_value = reduce(lambda x, y: x * int(y / gcd(x, y)), first_set)
divisors = [divisor for divisor in range(1, gcd_value + 1) if gcd_value % divisor == 0]
print(sum(1 for divisor in divisors if divisor % lcm_value == 0))
| true | true |
1c3a682dc1fa384b56a768b52ec6da74b3c2ef6c | 2,758 | py | Python | Homework.py | q1e123/Pedagogy-CSV-Homework-Verify | aa571d3692b5343b83b0497d71420ce849b516f3 | [
"MIT"
] | null | null | null | Homework.py | q1e123/Pedagogy-CSV-Homework-Verify | aa571d3692b5343b83b0497d71420ce849b516f3 | [
"MIT"
] | null | null | null | Homework.py | q1e123/Pedagogy-CSV-Homework-Verify | aa571d3692b5343b83b0497d71420ce849b516f3 | [
"MIT"
] | null | null | null | import csv
import pandas
class Homework():
BAD_NAME_FILE_SUFIX = '-wrong-name.txt'
CHECK_FILE_NAME = 'check.txt'
FRAUD_MESSAGE = 'FRAUD'
GOOD_MESSAGE = 'GOOD'
WRONG_MESSAGE = 'WRONG'
NOT_SENT_MESSAGE = 'NOT SENT'
def __init__(self, title, student_list, path_list):
self.title = title
self.student_homework_map = dict.fromkeys(student_list)
self.student_status_map = dict.fromkeys(student_list)
self.bad_name_file_name = self.title + Homework.BAD_NAME_FILE_SUFIX
self.dataset_list = []
self.read_dataset(path_list)
def read_dataset(self, path_list):
for path in path_list:
with open(path) as csv_file:
lines = csv.reader(csv_file)
line_list = [line for line in lines]
self.dataset_list.append(line_list)
def add_student_homework(self, student, solution):
print(self.title, student, solution)
if student not in student:
with open(self.bad_name_file_name, 'a') as bad_name_file:
bad_name_file.write(student)
return
try:
with open(solution) as csv_file:
lines = csv.reader(csv_file)
line_list = [line for line in lines]
if line_list in self.dataset_list:
self.student_status_map[student] = Homework.WRONG_MESSAGE
return
if line_list in self.student_homework_map.values():
self.student_homework_map[student] = line_list
self.student_status_map[student] = Homework.FRAUD_MESSAGE
for student in self.student_homework_map:
if line_list == self.student_homework_map[student]:
self.student_homework_map[student] = line_list
self.student_status_map[student] = Homework.FRAUD_MESSAGE
return
self.student_status_map[student] = Homework.GOOD_MESSAGE
self.student_homework_map[student] = line_list
except Exception as e:
print(e)
self.student_status_map[student] = Homework.WRONG_MESSAGE
def check_not_sent(self):
for student in self.student_status_map:
if self.student_status_map[student] is None:
self.student_status_map[student] = Homework.NOT_SENT_MESSAGE
def to_csv(self):
with open(self.title, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for student in self.student_status_map:
writer.writerow([student, self.student_status_map[student]])
| 43.09375 | 86 | 0.602973 | import csv
import pandas
class Homework():
BAD_NAME_FILE_SUFIX = '-wrong-name.txt'
CHECK_FILE_NAME = 'check.txt'
FRAUD_MESSAGE = 'FRAUD'
GOOD_MESSAGE = 'GOOD'
WRONG_MESSAGE = 'WRONG'
NOT_SENT_MESSAGE = 'NOT SENT'
def __init__(self, title, student_list, path_list):
self.title = title
self.student_homework_map = dict.fromkeys(student_list)
self.student_status_map = dict.fromkeys(student_list)
self.bad_name_file_name = self.title + Homework.BAD_NAME_FILE_SUFIX
self.dataset_list = []
self.read_dataset(path_list)
def read_dataset(self, path_list):
for path in path_list:
with open(path) as csv_file:
lines = csv.reader(csv_file)
line_list = [line for line in lines]
self.dataset_list.append(line_list)
def add_student_homework(self, student, solution):
print(self.title, student, solution)
if student not in student:
with open(self.bad_name_file_name, 'a') as bad_name_file:
bad_name_file.write(student)
return
try:
with open(solution) as csv_file:
lines = csv.reader(csv_file)
line_list = [line for line in lines]
if line_list in self.dataset_list:
self.student_status_map[student] = Homework.WRONG_MESSAGE
return
if line_list in self.student_homework_map.values():
self.student_homework_map[student] = line_list
self.student_status_map[student] = Homework.FRAUD_MESSAGE
for student in self.student_homework_map:
if line_list == self.student_homework_map[student]:
self.student_homework_map[student] = line_list
self.student_status_map[student] = Homework.FRAUD_MESSAGE
return
self.student_status_map[student] = Homework.GOOD_MESSAGE
self.student_homework_map[student] = line_list
except Exception as e:
print(e)
self.student_status_map[student] = Homework.WRONG_MESSAGE
def check_not_sent(self):
for student in self.student_status_map:
if self.student_status_map[student] is None:
self.student_status_map[student] = Homework.NOT_SENT_MESSAGE
def to_csv(self):
with open(self.title, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for student in self.student_status_map:
writer.writerow([student, self.student_status_map[student]])
| true | true |
1c3a687f897f4b8336c532c52d749853e57c12dd | 3,182 | py | Python | analyze/wordstats.py | internaut/facebook-discussion-tk | 83865e8090da00ad9b2629393381db7c6953649c | [
"MIT"
] | 36 | 2016-01-01T08:58:20.000Z | 2021-09-22T18:54:51.000Z | analyze/wordstats.py | internaut/facebook-discussion-tk | 83865e8090da00ad9b2629393381db7c6953649c | [
"MIT"
] | null | null | null | analyze/wordstats.py | internaut/facebook-discussion-tk | 83865e8090da00ad9b2629393381db7c6953649c | [
"MIT"
] | 7 | 2016-09-29T20:15:33.000Z | 2019-02-17T13:25:05.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
from collections import defaultdict
import sys
import time
from pattern.text.de import split, parse
from libleipzig import Baseform
from suds import WebFault
LIBLEIPZIG_FOR_LEMMATA = True
LIBLEIPZIG_FAIL_RETRIES = 10
LIBLEIPZIG_FAIL_RETRIES_SLEEP_SEC = 1
STRINGS_STARTWITH_BLACKLIST = ()
STRINGS_EQUALS_BLACKLIST = ()
STRINGS_EQUALS_CS_BLACKLIST = () # case sensitive
# text = u"""Eine Katze liegt auf einer Matte. Viele Katzen liegen auf vielen Matten. Die Katzen schlafen,
# die Matten nicht. Die Hunde schlafen auch nicht. Man hört ihr lautes Gebell draußen vor dem Haus. In
# vielen Häusern schlafen viele Katzen. Häuser haben Türen."""
def lemma_and_type_from_leipzig(word):
base = Baseform(word)
if base and base[0].Grundform:
return base[0].Grundform.lower(), base[0].Wortart
else:
return None, None
def count_nouns_in_text(text):
parsed_text = parse(text, lemmata=True)
nouns = defaultdict(int)
for sentence in split(parsed_text):
# print('SENTENCE: %s' % sentence)
for w_i, w in enumerate(sentence.words):
# print('> WORD: %s (%s)' % (w, w.string))
if w.string and len(w.string) > 1 \
and w.string.lower() not in STRINGS_EQUALS_BLACKLIST \
and w.string not in STRINGS_EQUALS_CS_BLACKLIST \
and not any([w.string.lower().startswith(bl_word) for bl_word in STRINGS_STARTWITH_BLACKLIST]) \
and (w.type.startswith('NN') or (LIBLEIPZIG_FOR_LEMMATA and w_i > 0 and w.string[0].isupper())):
l = None
came_from_leipzig = False
if LIBLEIPZIG_FOR_LEMMATA:
libleipzig_err = True
libleipzig_retries = 0
while libleipzig_err and libleipzig_retries <= LIBLEIPZIG_FAIL_RETRIES:
try:
l, wordtype = lemma_and_type_from_leipzig(w.string)
libleipzig_err = False
except WebFault:
print('WebFault while using libleipzig (retry %d)' % libleipzig_retries, file=sys.stderr)
libleipzig_retries += 1
time.sleep(LIBLEIPZIG_FAIL_RETRIES_SLEEP_SEC)
if l and wordtype:
if wordtype != 'N': # libleipzig says this is no noun
# print('>> libleipzig: no noun')
continue
came_from_leipzig = True
else:
# print('>> libleipzig: undetermined')
pass
if not l:
l = w.lemma or w.string
came_from_leipzig = False
# print('>> NOUN: %s (%s, %s)' % (w.string, l, came_from_leipzig))
nouns[l] += 1
# print('---')
# sorted_nouns = sorted(nouns.items(), key=lambda item: item[1], reverse=True)
# for lemma, count in sorted_nouns:
# print('%s:\t\t%d' % (lemma, count))
return nouns
| 39.775 | 117 | 0.574167 |
from __future__ import print_function
from collections import defaultdict
import sys
import time
from pattern.text.de import split, parse
from libleipzig import Baseform
from suds import WebFault
LIBLEIPZIG_FOR_LEMMATA = True
LIBLEIPZIG_FAIL_RETRIES = 10
LIBLEIPZIG_FAIL_RETRIES_SLEEP_SEC = 1
STRINGS_STARTWITH_BLACKLIST = ()
STRINGS_EQUALS_BLACKLIST = ()
STRINGS_EQUALS_CS_BLACKLIST = ()
# die Matten nicht. Die Hunde schlafen auch nicht. Man hört ihr lautes Gebell draußen vor dem Haus. In
# vielen Häusern schlafen viele Katzen. Häuser haben Türen."""
def lemma_and_type_from_leipzig(word):
base = Baseform(word)
if base and base[0].Grundform:
return base[0].Grundform.lower(), base[0].Wortart
else:
return None, None
def count_nouns_in_text(text):
parsed_text = parse(text, lemmata=True)
nouns = defaultdict(int)
for sentence in split(parsed_text):
for w_i, w in enumerate(sentence.words):
if w.string and len(w.string) > 1 \
and w.string.lower() not in STRINGS_EQUALS_BLACKLIST \
and w.string not in STRINGS_EQUALS_CS_BLACKLIST \
and not any([w.string.lower().startswith(bl_word) for bl_word in STRINGS_STARTWITH_BLACKLIST]) \
and (w.type.startswith('NN') or (LIBLEIPZIG_FOR_LEMMATA and w_i > 0 and w.string[0].isupper())):
l = None
came_from_leipzig = False
if LIBLEIPZIG_FOR_LEMMATA:
libleipzig_err = True
libleipzig_retries = 0
while libleipzig_err and libleipzig_retries <= LIBLEIPZIG_FAIL_RETRIES:
try:
l, wordtype = lemma_and_type_from_leipzig(w.string)
libleipzig_err = False
except WebFault:
print('WebFault while using libleipzig (retry %d)' % libleipzig_retries, file=sys.stderr)
libleipzig_retries += 1
time.sleep(LIBLEIPZIG_FAIL_RETRIES_SLEEP_SEC)
if l and wordtype:
if wordtype != 'N':
continue
came_from_leipzig = True
else:
pass
if not l:
l = w.lemma or w.string
came_from_leipzig = False
nouns[l] += 1
return nouns
| true | true |
1c3a6a7a4d34c73b925f73b27932c4f907271fe1 | 23,157 | py | Python | gfootball/env/football_env_test.py | KanwarKelide/football | 149c03dfd90aaf652a61c656e40cefa5dc9e0454 | [
"Apache-2.0"
] | null | null | null | gfootball/env/football_env_test.py | KanwarKelide/football | 149c03dfd90aaf652a61c656e40cefa5dc9e0454 | [
"Apache-2.0"
] | null | null | null | gfootball/env/football_env_test.py | KanwarKelide/football | 149c03dfd90aaf652a61c656e40cefa5dc9e0454 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Football environment E2E test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from collections import Iterable
from multiprocessing import pool
from multiprocessing import Queue
import gfootball
import os
import random
import threading
import zlib
from gfootball.env import config
from gfootball.env import football_action_set
from gfootball.env import football_env
from gfootball.env import wrappers
from gfootball.env import scenario_builder
import numpy as np
import psutil
from six.moves import range
import unittest
fast_run = False
def observation_hash(observation, hash_value = 0):
for obs in observation:
for key, value in sorted(obs.items()):
hash_value = zlib.adler32(key.encode(), hash_value)
hash_value = zlib.adler32(np.ascontiguousarray(value), hash_value)
return hash_value
def compute_hash(env, actions, extensive=False):
"""Computes hash of observations returned by environment for a given scenario.
Args:
env: environment
actions: number of actions
extensive: whether to run full episode
Returns:
hash
"""
o = env.reset()
hash_value = observation_hash(o)
done = False
step = 0
while not done:
o, _, done, _ = env.step(step % actions)
hash_value = observation_hash(o, hash_value)
step += 1
if not extensive and step >= 200:
break
return hash_value
def run_scenario(cfg, queue, actions, render=False, validation=True):
env = football_env.FootballEnv(cfg)
if render:
env.render()
obs = env.reset()
queue.put(obs)
if validation:
env.tracker_setup(0, 999999999999999)
done = False
step = 0
while True:
if isinstance(actions, Iterable):
if step >= len(actions):
break
action = actions[step]
else:
action = actions.get()
if action is None:
break
step += 1
if isinstance(action, Iterable):
obs, _, done, _ = env.step(action)
else:
obs, _, done, _ = env.step([action, action])
queue.put(obs)
if done:
break
queue.put(None)
env.close()
def normalize_observation(o):
if o['ball'][0] == -0:
o['ball'][0] = 0
if o['ball'][1] == -0:
o['ball'][1] = 0
if o['ball_direction'][0] == -0:
o['ball_direction'][0] = 0
if o['ball_direction'][1] == -0:
o['ball_direction'][1] = 0
class FootballEnvTest(parameterized.TestCase):
def compare_observations(self, l1, l2):
for o1, o2 in zip(l1, l2):
if 'frame' in o1 and 'frame' not in o2:
del o1['frame']
elif 'frame' in o2 and 'frame' not in o1:
del o2['frame']
normalize_observation(o1)
normalize_observation(o2)
o1 = str(tuple(sorted(o1.items())))
o2 = str(tuple(sorted(o2.items())))
self.assertEqual(o1, o2)
def check_determinism(self, extensive=False):
"""Check that environment is deterministic."""
if 'UNITTEST_IN_DOCKER' in os.environ:
return
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic'
})
env = football_env.FootballEnv(cfg)
actions = len(football_action_set.get_action_set(cfg))
for episode in range(1 if extensive else 2):
hash_value = compute_hash(env, actions, extensive)
if extensive:
if hash_value != 1174966789:
self.assertEqual(hash_value, 29082684)
elif episode % 2 == 0:
if hash_value != 2275067030:
self.assertEqual(hash_value, 2143616507)
else:
if hash_value != 2045063811:
self.assertEqual(hash_value, 1264083657)
env.close()
def test___control_all_players(self):
"""Validate MultiAgentToSingleAgent wrapper and control_all_players flag."""
try:
gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
number_of_left_players_agent_controls=2)
except AssertionError:
pass
else:
self.assertTrue(False)
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
representation='simple115v2',
number_of_left_players_agent_controls=11,
number_of_right_players_agent_controls=11)
obs = env.reset()
self.assertLen(obs, 22)
self.assertIn(obs, env.observation_space)
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
number_of_left_players_agent_controls=11,
number_of_right_players_agent_controls=0)
obs = env.reset()
self.assertLen(obs, 11)
self.assertIn(obs, env.observation_space)
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
representation='simple115v2',
number_of_left_players_agent_controls=0,
number_of_right_players_agent_controls=11)
obs = env.reset()
self.assertLen(obs, 11)
self.assertIn(obs, env.observation_space)
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
number_of_left_players_agent_controls=1,
number_of_right_players_agent_controls=1)
obs = env.reset()
self.assertLen(obs, 2)
self.assertIn(obs, env.observation_space)
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
number_of_left_players_agent_controls=1)
obs = env.reset()
self.assertEqual(np.shape(obs), (72, 96, 4))
self.assertIn(obs, env.observation_space)
obs, _, _, _ = env.step([football_action_set.action_left])
self.assertEqual(np.shape(obs), (72, 96, 4))
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
representation='raw',
number_of_left_players_agent_controls=1,
number_of_right_players_agent_controls=1)
obs = env.reset()
self.assertLen(obs, 2)
self.assertEqual(obs[0]['sticky_actions'][0], 0)
self.assertEqual(obs[1]['sticky_actions'][4], 0)
obs, _, _, _ = env.step(
[football_action_set.action_idle, football_action_set.action_idle])
obs, _, _, _ = env.step(
[football_action_set.action_left, football_action_set.action_right])
self.assertLen(obs, 2)
self.assertEqual(obs[0]['sticky_actions'][0], 1)
self.assertEqual(obs[1]['sticky_actions'][4], 1)
def test_score_empty_goal(self):
"""Score on an empty goal."""
cfg = config.Config()
env = football_env.FootballEnv(cfg)
cfg['level'] = 'academy_empty_goal'
last_o = env.reset()[0]
for _ in range(120):
o, reward, done, _ = env.step(football_action_set.action_right)
o = o[0]
if done:
self.assertEqual(reward, 1)
break
self.assertFalse(done)
self.assertGreaterEqual(o['ball'][0], last_o['ball'][0] - 0.01)
self.assertGreaterEqual(
o['left_team'][o['active']][0],
last_o['left_team'][last_o['active']][0] - 0.01)
last_o = o
self.assertTrue(done)
env.close()
def test_second_half(self):
"""Test second half feature."""
cfg = config.Config()
cfg['level'] = 'tests.second_half'
env = football_env.FootballEnv(cfg)
for _ in range(5):
o, _, done, _ = env.step(football_action_set.action_idle)
self.assertFalse(done)
self.assertAlmostEqual(o[0]['left_team'][o[0]['active']][0], 0, delta=0.1)
for _ in range(6):
self.assertFalse(done)
o, _, done, _ = env.step(football_action_set.action_idle)
self.assertAlmostEqual(
o[0]['left_team'][o[0]['active']][0], -0.5, delta=0.1)
self.assertTrue(done)
env.close()
def test___render(self):
"""Make sure rendering is not broken."""
if 'UNITTEST_IN_DOCKER' in os.environ:
# Rendering is not supported.
return
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
})
env = football_env.FootballEnv(cfg)
env.render()
o = env.reset()
hash_value = observation_hash(o)
for _ in range(10):
o, _, _, _ = env.step(football_action_set.action_right)
hash_value = observation_hash(o, hash_value)
self.assertEqual(hash_value, 18699114)
env.close()
def test_dynamic_render(self):
"""Verifies dynamic render support."""
if 'UNITTEST_IN_DOCKER' in os.environ:
# Rendering is not supported.
return
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
})
env = football_env.FootballEnv(cfg)
o = env.reset()
for _ in range(10):
o, _, _, _ = env.step(football_action_set.action_right)
self.assertNotIn('frame', o[0])
env.render()
self.assertIn('frame', env.observation()[0])
self.compare_observations(o, env.observation())
o, _, _, _ = env.step(football_action_set.action_right)
self.assertIn('frame', env.observation()[0])
env.disable_render()
self.compare_observations(o, env.observation())
env.close()
def test_different_action_formats(self):
"""Verify different action formats are accepted."""
cfg = config.Config()
env = football_env.FootballEnv(cfg)
env.reset()
env.step(football_action_set.action_right)
env.step([football_action_set.action_right])
env.step(np.array([football_action_set.action_right]))
env.step(np.array(football_action_set.action_right))
env.close()
def test_determinism_extensive(self):
self.check_determinism(extensive=True)
def test_determinism(self):
self.check_determinism()
def test_multi_instance(self):
"""Validates that two instances of the env can run in the same thread."""
tpool = pool.ThreadPool(processes=2)
run1 = tpool.apply_async(self.check_determinism)
run2 = tpool.apply_async(self.check_determinism)
run1.get()
run2.get()
def test_multi_render(self):
"""Only one rendering instance allowed at a time."""
if 'UNITTEST_IN_DOCKER' in os.environ:
# Rendering is not supported.
return
cfg = config.Config({})
env1 = football_env.FootballEnv(cfg)
env1.render()
env1.reset()
env2 = football_env.FootballEnv(cfg)
try:
env2.render()
except AssertionError:
env1.close()
env2.close()
# It is still possible to render.
env3 = football_env.FootballEnv(cfg)
env3.reset()
env3.close()
return
assert False, 'Exception expected'
def test_scenarios_are_at_least_loading(self):
cfg = config.Config()
for l in scenario_builder.all_scenarios():
cfg['level'] = l
unused_game_cfg = cfg.ScenarioConfig()
def memory_usage(self):
process = psutil.Process(os.getpid())
return process.memory_info().rss
def test__memory_usage(self):
"""Make sure memory usage is low when not recording videos."""
# This test has to go first, so that memory usage is not affected.
if 'UNITTEST_IN_DOCKER' in os.environ:
# Forge doesn't support rendering.
return
cfg = config.Config({'write_video': False})
env = football_env.FootballEnv(cfg)
env.render()
env.reset()
initial_memory = self.memory_usage()
for _ in range(100):
_, _, _, _ = env.step(football_action_set.action_right)
memory_usage = self.memory_usage() - initial_memory
env.close()
self.assertGreaterEqual(10000000, memory_usage)
def test_player_order_invariant(self):
"""Checks that environment behaves the same regardless of players order."""
players = ['agent:right_players=1', 'lazy:left_players=11']
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
'players': players
})
env = football_env.FootballEnv(cfg)
actions = len(football_action_set.get_action_set(cfg))
hash_value1 = compute_hash(env, actions)
players = [players[1], players[0]]
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
'players': players
})
env = football_env.FootballEnv(cfg)
hash_value2 = compute_hash(env, actions)
self.assertEqual(hash_value1, hash_value2)
env.close()
@parameterized.parameters(range(1))
def test_setstate(self, seed):
"""Checks setState functionality."""
cfg1 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'reverse_team_processing' : False
})
cfg2 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed + 10,
'reverse_team_processing' : False
})
env1 = football_env.FootballEnv(cfg1)
env2 = football_env.FootballEnv(cfg2)
initial_obs = env1.reset()
env2.reset()
initial_state = env1.get_state()
env2.set_state(initial_state)
random.seed(seed)
actions = len(football_action_set.get_action_set(cfg1))
first_action = random.randint(0, actions - 1)
first_obs, _, _, _ = env1.step(first_action)
_, _, _, _ = env2.step(first_action)
step = 0
limit = 10 if fast_run else 3000
while step < limit:
step += 1
action = random.randint(0, actions - 1)
if step % 10 == 0:
env2.set_state(initial_state)
self.compare_observations(initial_obs, env2.observation())
env2.step(first_action)
self.compare_observations(first_obs, env2.observation())
env2.set_state(env1.get_state())
self.compare_observations(env1.observation(), env2.observation())
_, _, done1, _ = env1.step(action)
_, _, done2, _ = env2.step(action)
self.assertEqual(done1, done2)
if done1:
break
env1.close()
env2.close()
@parameterized.parameters(range(1))
def test_symmetry(self, seed):
"""Checks game symmetry."""
processes = []
cfg1 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': False,
})
cfg2 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': True,
})
random.seed(seed)
action_cnt = len(football_action_set.get_action_set(cfg1))
actions = [random.randint(0, action_cnt - 1) for _ in range(10 if fast_run else 3000)]
queue1 = Queue()
thread1 = threading.Thread(
target=run_scenario, args=(cfg1, queue1, actions))
thread1.start()
queue2 = Queue()
thread2 = threading.Thread(
target=run_scenario, args=(cfg2, queue2, actions))
thread2.start()
while True:
o1 = queue1.get()
o2 = queue2.get()
if not o1 or not o2:
self.assertEqual(o1, o2)
break
self.compare_observations(o1[:1], o2[1:])
self.compare_observations(o2[:1], o1[1:])
thread1.join()
thread2.join()
@parameterized.parameters((1, 'left', True), (0, 'right', True),
(1, 'left', False), (0, 'right', False))
def test_offside(self, episode, team2, reverse):
cfg = config.Config({
'level': 'tests.offside_test',
'players': ['agent:{}_players=1'.format(team2)],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
env.reset()
o, _, done, _ = env.step(football_action_set.action_long_pass)
done = False
while not done and o[0]['right_team'][1][0] == 0:
o, _, done, _ = env.step(football_action_set.action_idle)
self.assertAlmostEqual(o[0]['ball'][0], 0.6, delta=0.4)
self.assertAlmostEqual(o[0]['right_team'][0][0], 0.6, delta=0.4)
self.assertAlmostEqual(o[0]['right_team'][1][0], 0.6, delta=0.4)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.6, delta=0.4)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.6, delta=0.4)
env.close()
@parameterized.parameters((0, 1, True), (1, -1, True), (0, 1, False),
(1, -1, False))
def test_corner(self, episode, factor, reverse):
cfg = config.Config({
'level': 'tests.corner_test',
'players': ['agent:left_players=1,right_players=1'],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step([football_action_set.action_left,
football_action_set.action_left])
self.assertAlmostEqual(o[0]['ball'][0], -0.95 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['ball'][1], 0.4 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][1][0], -0.95 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.95, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.9 * factor, delta=0.2)
env.close()
def test_penalty(self):
cfg = config.Config({
'level': 'tests.penalty',
'players': ['agent:left_players=1'],
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step([football_action_set.action_sliding])
self.assertAlmostEqual(o[0]['ball'][0], -0.809, delta=0.01)
self.assertAlmostEqual(o[0]['ball'][1], 0.0, delta=0.01)
self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][1][0], -0.75, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.95, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.70, delta=0.1)
env.close()
@parameterized.parameters((0, -1, True), (1, 1, True), (0, -1, False),
(1, 1, False))
def test_keeper_ball(self, episode, factor, reverse):
cfg = config.Config({
'level': 'tests.keeper_test',
'players': ['agent:left_players=1,right_players=1'],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step([football_action_set.action_right,
football_action_set.action_right])
self.assertAlmostEqual(o[0]['ball'][0], -1.0 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['ball'][1], 0.0, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][1][0], 0.4, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.9, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.33, delta=0.1)
env.close()
@parameterized.parameters((0, True), (1, True), (0, False), (1, False))
def test_goal(self, episode, reverse):
cfg = config.Config({
'level': 'tests.goal_test',
'players': ['agent:left_players=1,right_players=1'],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step(
[football_action_set.action_right, football_action_set.action_right])
self.assertAlmostEqual(o[0]['ball'][0], 0.0, delta=0.1)
self.assertEqual(o[0]['score'][episode], 1)
self.assertEqual(o[0]['score'][1 - episode], 0)
env.close()
@parameterized.parameters(range(1))
def test_render_state_equals_norender(self, seed):
"""Checks that rendering game state is the same as non-rendering."""
if 'UNITTEST_IN_DOCKER' in os.environ:
# Forge doesn't support rendering.
return
cfg1 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': False,
})
cfg2 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': False,
})
random.seed(seed)
action_cnt = len(football_action_set.get_action_set(cfg1))
actions = [random.randint(0, action_cnt - 1) for _ in range(50)]
queue1 = Queue()
thread1 = threading.Thread(
target=run_scenario, args=(cfg1, queue1, actions, False, False))
thread1.start()
queue2 = Queue()
thread2 = threading.Thread(
target=run_scenario, args=(cfg2, queue2, actions, True, False))
thread2.start()
while True:
o1 = queue1.get()
o2 = queue2.get()
if not o1 or not o2:
self.assertEqual(o1, o2)
break
self.compare_observations(o1, o2)
thread1.join()
thread2.join()
def test_get_state_wrapper(self):
env = gfootball.env.create_environment(
stacked=True,
env_name='academy_empty_goal',
rewards='checkpoints,scoring')
o = env.reset()
state = env.get_state()
reward1 = 0
hash1 = 0
while reward1 < 0.9:
o, r, _, _ = env.step(football_action_set.action_right)
reward1 += r
hash1 = zlib.adler32(o, hash1)
self.assertAlmostEqual(reward1, 0.9, delta=0.01)
env.set_state(state)
hash2 = 0
reward2 = 0
while reward2 < 0.9:
o, r, _, _ = env.step(football_action_set.action_right)
reward2 += r
hash2 = zlib.adler32(o, hash2)
self.assertAlmostEqual(reward2, 0.9, delta=0.01)
self.assertEqual(hash1, hash2)
def test_restore_after_reset(self):
cfg = config.Config({
'level': '11_vs_11_competition',
})
env = football_env.FootballEnv(cfg)
obs = env.reset()
state = env.get_state()
env.reset()
env.set_state(state)
obs_ = env.observation()
state_ = env.get_state()
env.step(0) # Test if can take step
self.compare_observations(obs, obs_)
self.assertEqual(state, state_)
def test_restore_after_done(self):
cfg = config.Config({
'level': 'academy_empty_goal_close',
})
env = football_env.FootballEnv(cfg)
env.reset()
state = env.get_state()
# Go right until reaching the goal.
done = False
while not done:
_, _, done, _ = env.step(5)
env.set_state(state)
env.step(0) # Test if can take step
if __name__ == '__main__':
unittest.main(failfast=True)
| 33.56087 | 90 | 0.651509 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from collections import Iterable
from multiprocessing import pool
from multiprocessing import Queue
import gfootball
import os
import random
import threading
import zlib
from gfootball.env import config
from gfootball.env import football_action_set
from gfootball.env import football_env
from gfootball.env import wrappers
from gfootball.env import scenario_builder
import numpy as np
import psutil
from six.moves import range
import unittest
fast_run = False
def observation_hash(observation, hash_value = 0):
for obs in observation:
for key, value in sorted(obs.items()):
hash_value = zlib.adler32(key.encode(), hash_value)
hash_value = zlib.adler32(np.ascontiguousarray(value), hash_value)
return hash_value
def compute_hash(env, actions, extensive=False):
o = env.reset()
hash_value = observation_hash(o)
done = False
step = 0
while not done:
o, _, done, _ = env.step(step % actions)
hash_value = observation_hash(o, hash_value)
step += 1
if not extensive and step >= 200:
break
return hash_value
def run_scenario(cfg, queue, actions, render=False, validation=True):
env = football_env.FootballEnv(cfg)
if render:
env.render()
obs = env.reset()
queue.put(obs)
if validation:
env.tracker_setup(0, 999999999999999)
done = False
step = 0
while True:
if isinstance(actions, Iterable):
if step >= len(actions):
break
action = actions[step]
else:
action = actions.get()
if action is None:
break
step += 1
if isinstance(action, Iterable):
obs, _, done, _ = env.step(action)
else:
obs, _, done, _ = env.step([action, action])
queue.put(obs)
if done:
break
queue.put(None)
env.close()
def normalize_observation(o):
if o['ball'][0] == -0:
o['ball'][0] = 0
if o['ball'][1] == -0:
o['ball'][1] = 0
if o['ball_direction'][0] == -0:
o['ball_direction'][0] = 0
if o['ball_direction'][1] == -0:
o['ball_direction'][1] = 0
class FootballEnvTest(parameterized.TestCase):
def compare_observations(self, l1, l2):
for o1, o2 in zip(l1, l2):
if 'frame' in o1 and 'frame' not in o2:
del o1['frame']
elif 'frame' in o2 and 'frame' not in o1:
del o2['frame']
normalize_observation(o1)
normalize_observation(o2)
o1 = str(tuple(sorted(o1.items())))
o2 = str(tuple(sorted(o2.items())))
self.assertEqual(o1, o2)
def check_determinism(self, extensive=False):
if 'UNITTEST_IN_DOCKER' in os.environ:
return
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic'
})
env = football_env.FootballEnv(cfg)
actions = len(football_action_set.get_action_set(cfg))
for episode in range(1 if extensive else 2):
hash_value = compute_hash(env, actions, extensive)
if extensive:
if hash_value != 1174966789:
self.assertEqual(hash_value, 29082684)
elif episode % 2 == 0:
if hash_value != 2275067030:
self.assertEqual(hash_value, 2143616507)
else:
if hash_value != 2045063811:
self.assertEqual(hash_value, 1264083657)
env.close()
def test___control_all_players(self):
try:
gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
number_of_left_players_agent_controls=2)
except AssertionError:
pass
else:
self.assertTrue(False)
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
representation='simple115v2',
number_of_left_players_agent_controls=11,
number_of_right_players_agent_controls=11)
obs = env.reset()
self.assertLen(obs, 22)
self.assertIn(obs, env.observation_space)
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
number_of_left_players_agent_controls=11,
number_of_right_players_agent_controls=0)
obs = env.reset()
self.assertLen(obs, 11)
self.assertIn(obs, env.observation_space)
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
representation='simple115v2',
number_of_left_players_agent_controls=0,
number_of_right_players_agent_controls=11)
obs = env.reset()
self.assertLen(obs, 11)
self.assertIn(obs, env.observation_space)
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
number_of_left_players_agent_controls=1,
number_of_right_players_agent_controls=1)
obs = env.reset()
self.assertLen(obs, 2)
self.assertIn(obs, env.observation_space)
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
number_of_left_players_agent_controls=1)
obs = env.reset()
self.assertEqual(np.shape(obs), (72, 96, 4))
self.assertIn(obs, env.observation_space)
obs, _, _, _ = env.step([football_action_set.action_left])
self.assertEqual(np.shape(obs), (72, 96, 4))
env = gfootball.env.create_environment(
env_name='tests.multiagent_wrapper',
rewards='checkpoints,scoring',
representation='raw',
number_of_left_players_agent_controls=1,
number_of_right_players_agent_controls=1)
obs = env.reset()
self.assertLen(obs, 2)
self.assertEqual(obs[0]['sticky_actions'][0], 0)
self.assertEqual(obs[1]['sticky_actions'][4], 0)
obs, _, _, _ = env.step(
[football_action_set.action_idle, football_action_set.action_idle])
obs, _, _, _ = env.step(
[football_action_set.action_left, football_action_set.action_right])
self.assertLen(obs, 2)
self.assertEqual(obs[0]['sticky_actions'][0], 1)
self.assertEqual(obs[1]['sticky_actions'][4], 1)
def test_score_empty_goal(self):
cfg = config.Config()
env = football_env.FootballEnv(cfg)
cfg['level'] = 'academy_empty_goal'
last_o = env.reset()[0]
for _ in range(120):
o, reward, done, _ = env.step(football_action_set.action_right)
o = o[0]
if done:
self.assertEqual(reward, 1)
break
self.assertFalse(done)
self.assertGreaterEqual(o['ball'][0], last_o['ball'][0] - 0.01)
self.assertGreaterEqual(
o['left_team'][o['active']][0],
last_o['left_team'][last_o['active']][0] - 0.01)
last_o = o
self.assertTrue(done)
env.close()
def test_second_half(self):
cfg = config.Config()
cfg['level'] = 'tests.second_half'
env = football_env.FootballEnv(cfg)
for _ in range(5):
o, _, done, _ = env.step(football_action_set.action_idle)
self.assertFalse(done)
self.assertAlmostEqual(o[0]['left_team'][o[0]['active']][0], 0, delta=0.1)
for _ in range(6):
self.assertFalse(done)
o, _, done, _ = env.step(football_action_set.action_idle)
self.assertAlmostEqual(
o[0]['left_team'][o[0]['active']][0], -0.5, delta=0.1)
self.assertTrue(done)
env.close()
def test___render(self):
if 'UNITTEST_IN_DOCKER' in os.environ:
return
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
})
env = football_env.FootballEnv(cfg)
env.render()
o = env.reset()
hash_value = observation_hash(o)
for _ in range(10):
o, _, _, _ = env.step(football_action_set.action_right)
hash_value = observation_hash(o, hash_value)
self.assertEqual(hash_value, 18699114)
env.close()
def test_dynamic_render(self):
if 'UNITTEST_IN_DOCKER' in os.environ:
return
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
})
env = football_env.FootballEnv(cfg)
o = env.reset()
for _ in range(10):
o, _, _, _ = env.step(football_action_set.action_right)
self.assertNotIn('frame', o[0])
env.render()
self.assertIn('frame', env.observation()[0])
self.compare_observations(o, env.observation())
o, _, _, _ = env.step(football_action_set.action_right)
self.assertIn('frame', env.observation()[0])
env.disable_render()
self.compare_observations(o, env.observation())
env.close()
def test_different_action_formats(self):
cfg = config.Config()
env = football_env.FootballEnv(cfg)
env.reset()
env.step(football_action_set.action_right)
env.step([football_action_set.action_right])
env.step(np.array([football_action_set.action_right]))
env.step(np.array(football_action_set.action_right))
env.close()
def test_determinism_extensive(self):
self.check_determinism(extensive=True)
def test_determinism(self):
self.check_determinism()
def test_multi_instance(self):
tpool = pool.ThreadPool(processes=2)
run1 = tpool.apply_async(self.check_determinism)
run2 = tpool.apply_async(self.check_determinism)
run1.get()
run2.get()
def test_multi_render(self):
if 'UNITTEST_IN_DOCKER' in os.environ:
return
cfg = config.Config({})
env1 = football_env.FootballEnv(cfg)
env1.render()
env1.reset()
env2 = football_env.FootballEnv(cfg)
try:
env2.render()
except AssertionError:
env1.close()
env2.close()
env3 = football_env.FootballEnv(cfg)
env3.reset()
env3.close()
return
assert False, 'Exception expected'
def test_scenarios_are_at_least_loading(self):
cfg = config.Config()
for l in scenario_builder.all_scenarios():
cfg['level'] = l
unused_game_cfg = cfg.ScenarioConfig()
def memory_usage(self):
process = psutil.Process(os.getpid())
return process.memory_info().rss
def test__memory_usage(self):
if 'UNITTEST_IN_DOCKER' in os.environ:
return
cfg = config.Config({'write_video': False})
env = football_env.FootballEnv(cfg)
env.render()
env.reset()
initial_memory = self.memory_usage()
for _ in range(100):
_, _, _, _ = env.step(football_action_set.action_right)
memory_usage = self.memory_usage() - initial_memory
env.close()
self.assertGreaterEqual(10000000, memory_usage)
def test_player_order_invariant(self):
players = ['agent:right_players=1', 'lazy:left_players=11']
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
'players': players
})
env = football_env.FootballEnv(cfg)
actions = len(football_action_set.get_action_set(cfg))
hash_value1 = compute_hash(env, actions)
players = [players[1], players[0]]
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
'players': players
})
env = football_env.FootballEnv(cfg)
hash_value2 = compute_hash(env, actions)
self.assertEqual(hash_value1, hash_value2)
env.close()
@parameterized.parameters(range(1))
def test_setstate(self, seed):
cfg1 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'reverse_team_processing' : False
})
cfg2 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed + 10,
'reverse_team_processing' : False
})
env1 = football_env.FootballEnv(cfg1)
env2 = football_env.FootballEnv(cfg2)
initial_obs = env1.reset()
env2.reset()
initial_state = env1.get_state()
env2.set_state(initial_state)
random.seed(seed)
actions = len(football_action_set.get_action_set(cfg1))
first_action = random.randint(0, actions - 1)
first_obs, _, _, _ = env1.step(first_action)
_, _, _, _ = env2.step(first_action)
step = 0
limit = 10 if fast_run else 3000
while step < limit:
step += 1
action = random.randint(0, actions - 1)
if step % 10 == 0:
env2.set_state(initial_state)
self.compare_observations(initial_obs, env2.observation())
env2.step(first_action)
self.compare_observations(first_obs, env2.observation())
env2.set_state(env1.get_state())
self.compare_observations(env1.observation(), env2.observation())
_, _, done1, _ = env1.step(action)
_, _, done2, _ = env2.step(action)
self.assertEqual(done1, done2)
if done1:
break
env1.close()
env2.close()
@parameterized.parameters(range(1))
def test_symmetry(self, seed):
processes = []
cfg1 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': False,
})
cfg2 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': True,
})
random.seed(seed)
action_cnt = len(football_action_set.get_action_set(cfg1))
actions = [random.randint(0, action_cnt - 1) for _ in range(10 if fast_run else 3000)]
queue1 = Queue()
thread1 = threading.Thread(
target=run_scenario, args=(cfg1, queue1, actions))
thread1.start()
queue2 = Queue()
thread2 = threading.Thread(
target=run_scenario, args=(cfg2, queue2, actions))
thread2.start()
while True:
o1 = queue1.get()
o2 = queue2.get()
if not o1 or not o2:
self.assertEqual(o1, o2)
break
self.compare_observations(o1[:1], o2[1:])
self.compare_observations(o2[:1], o1[1:])
thread1.join()
thread2.join()
@parameterized.parameters((1, 'left', True), (0, 'right', True),
(1, 'left', False), (0, 'right', False))
def test_offside(self, episode, team2, reverse):
cfg = config.Config({
'level': 'tests.offside_test',
'players': ['agent:{}_players=1'.format(team2)],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
env.reset()
o, _, done, _ = env.step(football_action_set.action_long_pass)
done = False
while not done and o[0]['right_team'][1][0] == 0:
o, _, done, _ = env.step(football_action_set.action_idle)
self.assertAlmostEqual(o[0]['ball'][0], 0.6, delta=0.4)
self.assertAlmostEqual(o[0]['right_team'][0][0], 0.6, delta=0.4)
self.assertAlmostEqual(o[0]['right_team'][1][0], 0.6, delta=0.4)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.6, delta=0.4)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.6, delta=0.4)
env.close()
@parameterized.parameters((0, 1, True), (1, -1, True), (0, 1, False),
(1, -1, False))
def test_corner(self, episode, factor, reverse):
cfg = config.Config({
'level': 'tests.corner_test',
'players': ['agent:left_players=1,right_players=1'],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step([football_action_set.action_left,
football_action_set.action_left])
self.assertAlmostEqual(o[0]['ball'][0], -0.95 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['ball'][1], 0.4 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][1][0], -0.95 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.95, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.9 * factor, delta=0.2)
env.close()
def test_penalty(self):
cfg = config.Config({
'level': 'tests.penalty',
'players': ['agent:left_players=1'],
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step([football_action_set.action_sliding])
self.assertAlmostEqual(o[0]['ball'][0], -0.809, delta=0.01)
self.assertAlmostEqual(o[0]['ball'][1], 0.0, delta=0.01)
self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][1][0], -0.75, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.95, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.70, delta=0.1)
env.close()
@parameterized.parameters((0, -1, True), (1, 1, True), (0, -1, False),
(1, 1, False))
def test_keeper_ball(self, episode, factor, reverse):
cfg = config.Config({
'level': 'tests.keeper_test',
'players': ['agent:left_players=1,right_players=1'],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step([football_action_set.action_right,
football_action_set.action_right])
self.assertAlmostEqual(o[0]['ball'][0], -1.0 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['ball'][1], 0.0, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][1][0], 0.4, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.9, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.33, delta=0.1)
env.close()
@parameterized.parameters((0, True), (1, True), (0, False), (1, False))
def test_goal(self, episode, reverse):
cfg = config.Config({
'level': 'tests.goal_test',
'players': ['agent:left_players=1,right_players=1'],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step(
[football_action_set.action_right, football_action_set.action_right])
self.assertAlmostEqual(o[0]['ball'][0], 0.0, delta=0.1)
self.assertEqual(o[0]['score'][episode], 1)
self.assertEqual(o[0]['score'][1 - episode], 0)
env.close()
@parameterized.parameters(range(1))
def test_render_state_equals_norender(self, seed):
if 'UNITTEST_IN_DOCKER' in os.environ:
# Forge doesn't support rendering.
return
cfg1 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': False,
})
cfg2 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': False,
})
random.seed(seed)
action_cnt = len(football_action_set.get_action_set(cfg1))
actions = [random.randint(0, action_cnt - 1) for _ in range(50)]
queue1 = Queue()
thread1 = threading.Thread(
target=run_scenario, args=(cfg1, queue1, actions, False, False))
thread1.start()
queue2 = Queue()
thread2 = threading.Thread(
target=run_scenario, args=(cfg2, queue2, actions, True, False))
thread2.start()
while True:
o1 = queue1.get()
o2 = queue2.get()
if not o1 or not o2:
self.assertEqual(o1, o2)
break
self.compare_observations(o1, o2)
thread1.join()
thread2.join()
def test_get_state_wrapper(self):
env = gfootball.env.create_environment(
stacked=True,
env_name='academy_empty_goal',
rewards='checkpoints,scoring')
o = env.reset()
state = env.get_state()
reward1 = 0
hash1 = 0
while reward1 < 0.9:
o, r, _, _ = env.step(football_action_set.action_right)
reward1 += r
hash1 = zlib.adler32(o, hash1)
self.assertAlmostEqual(reward1, 0.9, delta=0.01)
env.set_state(state)
hash2 = 0
reward2 = 0
while reward2 < 0.9:
o, r, _, _ = env.step(football_action_set.action_right)
reward2 += r
hash2 = zlib.adler32(o, hash2)
self.assertAlmostEqual(reward2, 0.9, delta=0.01)
self.assertEqual(hash1, hash2)
def test_restore_after_reset(self):
cfg = config.Config({
'level': '11_vs_11_competition',
})
env = football_env.FootballEnv(cfg)
obs = env.reset()
state = env.get_state()
env.reset()
env.set_state(state)
obs_ = env.observation()
state_ = env.get_state()
env.step(0)
self.compare_observations(obs, obs_)
self.assertEqual(state, state_)
def test_restore_after_done(self):
cfg = config.Config({
'level': 'academy_empty_goal_close',
})
env = football_env.FootballEnv(cfg)
env.reset()
state = env.get_state()
done = False
while not done:
_, _, done, _ = env.step(5)
env.set_state(state)
env.step(0)
if __name__ == '__main__':
unittest.main(failfast=True)
| true | true |
1c3a6a9a3af4da22510834166c0e0f9ba885cfff | 451 | py | Python | counter/src/counter.py | roicostas/docker-orquestacion | 0b47a46f8fafdae7f3fe88fcf4bdb7dd11eff8f7 | [
"Apache-2.0"
] | 1 | 2017-12-21T11:44:05.000Z | 2017-12-21T11:44:05.000Z | counter/src/counter.py | roicostas/docker-orquestacion | 0b47a46f8fafdae7f3fe88fcf4bdb7dd11eff8f7 | [
"Apache-2.0"
] | null | null | null | counter/src/counter.py | roicostas/docker-orquestacion | 0b47a46f8fafdae7f3fe88fcf4bdb7dd11eff8f7 | [
"Apache-2.0"
] | 2 | 2016-10-31T19:25:21.000Z | 2017-02-07T08:40:10.000Z | #!/usr/bin/env python
from flask import Flask, render_template
import redis
app = Flask(__name__)
app.config['DEBUG'] = True
redis = redis.Redis("redis")
@app.route("/")
def index():
count = 1
bcount = redis.get('count')
if bcount:
count = int(bcount.decode('utf8')) + 1
redis.set('count', str(count))
return render_template('index.html', count=count)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
| 20.5 | 53 | 0.643016 |
from flask import Flask, render_template
import redis
app = Flask(__name__)
app.config['DEBUG'] = True
redis = redis.Redis("redis")
@app.route("/")
def index():
count = 1
bcount = redis.get('count')
if bcount:
count = int(bcount.decode('utf8')) + 1
redis.set('count', str(count))
return render_template('index.html', count=count)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
| true | true |
1c3a6b5b139ff6ab035e2f38e7979eb6bdf13b68 | 2,189 | py | Python | hw/ip/otbn/dv/otbnsim/standalone.py | msfschaffner/opentitan-bak | de4cb1bb9e7b707a3ca2a6882d83af7ed2aa1ab8 | [
"Apache-2.0"
] | 3 | 2019-12-23T13:16:39.000Z | 2022-01-29T23:45:31.000Z | hw/ip/otbn/dv/otbnsim/standalone.py | msfschaffner/opentitan-bak | de4cb1bb9e7b707a3ca2a6882d83af7ed2aa1ab8 | [
"Apache-2.0"
] | 2 | 2021-11-01T15:02:37.000Z | 2022-01-17T14:34:36.000Z | hw/ip/otbn/dv/otbnsim/standalone.py | msfschaffner/opentitan-bak | de4cb1bb9e7b707a3ca2a6882d83af7ed2aa1ab8 | [
"Apache-2.0"
] | 1 | 2019-12-24T02:07:46.000Z | 2019-12-24T02:07:46.000Z | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import sys
from sim.load_elf import load_elf
from sim.standalonesim import StandaloneSim
from sim.stats import ExecutionStatAnalyzer
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('elf')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument(
'--dump-dmem',
metavar="FILE",
type=argparse.FileType('wb'),
help=("after execution, write the data memory contents to this file. "
"Use '-' to write to STDOUT.")
)
parser.add_argument(
'--dump-regs',
metavar="FILE",
type=argparse.FileType('w'),
help=("after execution, write the GPR and WDR contents to this file. "
"Use '-' to write to STDOUT.")
)
parser.add_argument(
'--dump-stats',
metavar="FILE",
type=argparse.FileType('w'),
help=("after execution, write execution statistics to this file. "
"Use '-' to write to STDOUT.")
)
args = parser.parse_args()
collect_stats = args.dump_stats is not None
sim = StandaloneSim()
exp_end_addr = load_elf(sim, args.elf)
key0 = int((str("deadbeef") * 12), 16)
key1 = int((str("badf00d") * 12), 16)
sim.state.wsrs.set_sideload_keys(key0, key1)
sim.state.ext_regs.commit()
sim.start(collect_stats)
sim.run(verbose=args.verbose, dump_file=args.dump_regs)
if exp_end_addr is not None:
if sim.state.pc != exp_end_addr:
print('Run stopped at PC {:#x}, but _expected_end_addr was {:#x}.'
.format(sim.state.pc, exp_end_addr),
file=sys.stderr)
return 1
if args.dump_dmem is not None:
args.dump_dmem.write(sim.dump_data())
if collect_stats:
assert sim.stats is not None
stat_analyzer = ExecutionStatAnalyzer(sim.stats, args.elf)
args.dump_stats.write(stat_analyzer.dump())
return 0
if __name__ == "__main__":
sys.exit(main())
| 29.186667 | 78 | 0.628141 |
import argparse
import sys
from sim.load_elf import load_elf
from sim.standalonesim import StandaloneSim
from sim.stats import ExecutionStatAnalyzer
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('elf')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument(
'--dump-dmem',
metavar="FILE",
type=argparse.FileType('wb'),
help=("after execution, write the data memory contents to this file. "
"Use '-' to write to STDOUT.")
)
parser.add_argument(
'--dump-regs',
metavar="FILE",
type=argparse.FileType('w'),
help=("after execution, write the GPR and WDR contents to this file. "
"Use '-' to write to STDOUT.")
)
parser.add_argument(
'--dump-stats',
metavar="FILE",
type=argparse.FileType('w'),
help=("after execution, write execution statistics to this file. "
"Use '-' to write to STDOUT.")
)
args = parser.parse_args()
collect_stats = args.dump_stats is not None
sim = StandaloneSim()
exp_end_addr = load_elf(sim, args.elf)
key0 = int((str("deadbeef") * 12), 16)
key1 = int((str("badf00d") * 12), 16)
sim.state.wsrs.set_sideload_keys(key0, key1)
sim.state.ext_regs.commit()
sim.start(collect_stats)
sim.run(verbose=args.verbose, dump_file=args.dump_regs)
if exp_end_addr is not None:
if sim.state.pc != exp_end_addr:
print('Run stopped at PC {:#x}, but _expected_end_addr was {:#x}.'
.format(sim.state.pc, exp_end_addr),
file=sys.stderr)
return 1
if args.dump_dmem is not None:
args.dump_dmem.write(sim.dump_data())
if collect_stats:
assert sim.stats is not None
stat_analyzer = ExecutionStatAnalyzer(sim.stats, args.elf)
args.dump_stats.write(stat_analyzer.dump())
return 0
if __name__ == "__main__":
sys.exit(main())
| true | true |
1c3a6c34a77463d37ec1c647937a7811b2ba42e0 | 1,168 | py | Python | music/migrations/0001_initial.py | Ashutoshkrs/music_app | 45a6a52be0232c8de99136dedd9a9c0e7fdfef3c | [
"MIT"
] | 1 | 2018-11-01T03:36:17.000Z | 2018-11-01T03:36:17.000Z | music/migrations/0001_initial.py | Ashutoshkrs/music_app | 45a6a52be0232c8de99136dedd9a9c0e7fdfef3c | [
"MIT"
] | null | null | null | music/migrations/0001_initial.py | Ashutoshkrs/music_app | 45a6a52be0232c8de99136dedd9a9c0e7fdfef3c | [
"MIT"
] | 20 | 2018-10-26T14:46:35.000Z | 2020-10-18T05:27:18.000Z | # Generated by Django 2.1.1 on 2018-10-22 19:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artist', models.CharField(max_length=250)),
('title', models.CharField(max_length=200)),
('genre', models.CharField(max_length=100)),
('albums_logo', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_type', models.CharField(max_length=10)),
('song_title', models.CharField(max_length=250)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='music.Album')),
],
),
]
| 33.371429 | 114 | 0.57363 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artist', models.CharField(max_length=250)),
('title', models.CharField(max_length=200)),
('genre', models.CharField(max_length=100)),
('albums_logo', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_type', models.CharField(max_length=10)),
('song_title', models.CharField(max_length=250)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='music.Album')),
],
),
]
| true | true |
1c3a6e1dd73a95a1331e02a93814d1f2befb97ed | 495 | py | Python | Question25.py | Schrodinger73/PracticalJournal_Class11 | cf098a87bcba807858ea7e904e8ac57742f19e70 | [
"MIT"
] | 13 | 2020-10-22T05:12:27.000Z | 2022-01-10T10:59:10.000Z | Question25.py | Schrodinger73/PracticalJournal_Class11 | cf098a87bcba807858ea7e904e8ac57742f19e70 | [
"MIT"
] | 2 | 2020-10-25T19:34:18.000Z | 2020-11-03T19:02:22.000Z | Question25.py | Schrodinger73/PracticalJournal_Class11 | cf098a87bcba807858ea7e904e8ac57742f19e70 | [
"MIT"
] | 7 | 2020-10-25T18:53:09.000Z | 2020-12-25T03:15:03.000Z | # Question
# 25. Write a program to reverse a list of integers
# Code
x = []
while True:
print("int",str(len(x)+1)+ ':')
i = input()
if i != 'n' and i.strip().replace(' ','') != '' and i.isdigit():
x.append(int(i))
else: break
y = []
for e in x: y.insert(0, e)
print("New list:",y)
# Input
# int 1:
# 2
# int 2:
# 3
# int 3:
# 4
# int 4:
# None
# New list: [4, 3, 2]
# Additional Comments
# if no digit is entered or letter is entered then end list | 17.678571 | 68 | 0.537374 |
x = []
while True:
print("int",str(len(x)+1)+ ':')
i = input()
if i != 'n' and i.strip().replace(' ','') != '' and i.isdigit():
x.append(int(i))
else: break
y = []
for e in x: y.insert(0, e)
print("New list:",y)
| true | true |
1c3a6e7165a7f68e70a893ebe3220424c741e6f8 | 597 | py | Python | main/wsgi.py | yosukesuzuki/djangae-template | 7bd1f2c72b18ed00d977e5f407fcf9307eeaeb87 | [
"Apache-2.0"
] | null | null | null | main/wsgi.py | yosukesuzuki/djangae-template | 7bd1f2c72b18ed00d977e5f407fcf9307eeaeb87 | [
"Apache-2.0"
] | null | null | null | main/wsgi.py | yosukesuzuki/djangae-template | 7bd1f2c72b18ed00d977e5f407fcf9307eeaeb87 | [
"Apache-2.0"
] | null | null | null | """
WSGI config for main project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
from main.boot import fix_path
fix_path()
import os
from django.core.wsgi import get_wsgi_application
from djangae.wsgi import DjangaeApplication
from djangae.utils import on_production
settings = "main.settings_live" if on_production() else "main.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings)
application = DjangaeApplication(get_wsgi_application())
| 25.956522 | 78 | 0.802345 |
from main.boot import fix_path
fix_path()
import os
from django.core.wsgi import get_wsgi_application
from djangae.wsgi import DjangaeApplication
from djangae.utils import on_production
settings = "main.settings_live" if on_production() else "main.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings)
application = DjangaeApplication(get_wsgi_application())
| true | true |
1c3a70203a0e47efd5e2100f9a6d2d8afa00b562 | 285 | py | Python | src/outpost/django/research/urls.py | medunigraz/outpost.django.research | 4336835663b9e1a8984c08ca709860ead8791d32 | [
"BSD-2-Clause"
] | null | null | null | src/outpost/django/research/urls.py | medunigraz/outpost.django.research | 4336835663b9e1a8984c08ca709860ead8791d32 | [
"BSD-2-Clause"
] | null | null | null | src/outpost/django/research/urls.py | medunigraz/outpost.django.research | 4336835663b9e1a8984c08ca709860ead8791d32 | [
"BSD-2-Clause"
] | null | null | null | from django.conf.urls import include, url
from . import views
app_name = "research"
urlpatterns = [
url(r"^search/(?P<database>[\w\-.]+)/(?P<schema>\w+)/$", views.SearchView.as_view()),
url(r"^detail/(?P<database>[\w\-.]+)/(?P<schema>\w+)/$", views.DetailView.as_view()),
]
| 25.909091 | 89 | 0.617544 | from django.conf.urls import include, url
from . import views
app_name = "research"
urlpatterns = [
url(r"^search/(?P<database>[\w\-.]+)/(?P<schema>\w+)/$", views.SearchView.as_view()),
url(r"^detail/(?P<database>[\w\-.]+)/(?P<schema>\w+)/$", views.DetailView.as_view()),
]
| true | true |
1c3a720c40fef0a082fb59e93c915bf4bc93a166 | 2,394 | py | Python | tests/test_core/test_alfa_client.py | stas12312/aioalfacrm | 1501634fa5ef4591936be2e6147827565e4a0b36 | [
"MIT"
] | null | null | null | tests/test_core/test_alfa_client.py | stas12312/aioalfacrm | 1501634fa5ef4591936be2e6147827565e4a0b36 | [
"MIT"
] | 49 | 2021-11-11T16:00:40.000Z | 2021-11-24T15:37:34.000Z | tests/test_core/test_alfa_client.py | stas12312/aioalfacrm | 1501634fa5ef4591936be2e6147827565e4a0b36 | [
"MIT"
] | null | null | null | import pytest
try:
from unittest.mock import AsyncMock as CoroutineMock, patch
except ImportError:
from asynctest import CoroutineMock, patch
from aioalfacrm import AlfaClient
def add_auth_request(aresponses):
aresponses.add('demo.s20.online', '/v2api/auth/login', 'POST', {'token': 'api-token'})
@pytest.mark.asyncio
async def test_init(session):
client = AlfaClient(
hostname='demo.s20.online',
email='test@test.example',
api_key='api-key',
branch_id=1,
session=session
)
assert client.hostname == 'demo.s20.online'
assert client.email == 'test@test.example'
assert client.api_key == 'api-key'
assert client.branch_id == 1
assert client._session == session
@pytest.mark.asyncio
async def test_close(session):
client = AlfaClient(
hostname='demo.s20.online',
email='test@test.example',
api_key='api_key',
branch_id=1,
session=session,
)
with patch('aiohttp.ClientSession.close', new_callable=CoroutineMock) as mocked_close:
await client.close()
mocked_close.assert_awaited()
@pytest.mark.asyncio
async def test_auth(aresponses):
add_auth_request(aresponses)
client = AlfaClient(
hostname='demo.s20.online',
email='test@test.example',
api_key='api_key',
branch_id=1,
)
await client.auth()
assert client.auth_manager.token.value == 'api-token'
await client.close()
@pytest.mark.asyncio
async def test_correct_check_auth(aresponses, session):
aresponses.add('auth.s20.online', '/v2api/auth/login', 'POST',
response=aresponses.Response(
status=403, body="{'name': 'Forbidden', 'message': 'Not Authorized', 'code': 0, 'status': 403}"
)
)
client = AlfaClient(
hostname='auth.s20.online',
email='test@test.example',
api_key='api_key',
branch_id=1,
session=session,
)
assert await client.check_auth() is False
@pytest.mark.asyncio
async def test_incorrect_check_auth(aresponses, session):
add_auth_request(aresponses)
client = AlfaClient(
hostname='demo.s20.online',
email='test@test.example',
api_key='api_key',
branch_id=1,
session=session,
)
assert await client.check_auth() is True
| 26.021739 | 118 | 0.639515 | import pytest
try:
from unittest.mock import AsyncMock as CoroutineMock, patch
except ImportError:
from asynctest import CoroutineMock, patch
from aioalfacrm import AlfaClient
def add_auth_request(aresponses):
aresponses.add('demo.s20.online', '/v2api/auth/login', 'POST', {'token': 'api-token'})
@pytest.mark.asyncio
async def test_init(session):
client = AlfaClient(
hostname='demo.s20.online',
email='test@test.example',
api_key='api-key',
branch_id=1,
session=session
)
assert client.hostname == 'demo.s20.online'
assert client.email == 'test@test.example'
assert client.api_key == 'api-key'
assert client.branch_id == 1
assert client._session == session
@pytest.mark.asyncio
async def test_close(session):
client = AlfaClient(
hostname='demo.s20.online',
email='test@test.example',
api_key='api_key',
branch_id=1,
session=session,
)
with patch('aiohttp.ClientSession.close', new_callable=CoroutineMock) as mocked_close:
await client.close()
mocked_close.assert_awaited()
@pytest.mark.asyncio
async def test_auth(aresponses):
add_auth_request(aresponses)
client = AlfaClient(
hostname='demo.s20.online',
email='test@test.example',
api_key='api_key',
branch_id=1,
)
await client.auth()
assert client.auth_manager.token.value == 'api-token'
await client.close()
@pytest.mark.asyncio
async def test_correct_check_auth(aresponses, session):
aresponses.add('auth.s20.online', '/v2api/auth/login', 'POST',
response=aresponses.Response(
status=403, body="{'name': 'Forbidden', 'message': 'Not Authorized', 'code': 0, 'status': 403}"
)
)
client = AlfaClient(
hostname='auth.s20.online',
email='test@test.example',
api_key='api_key',
branch_id=1,
session=session,
)
assert await client.check_auth() is False
@pytest.mark.asyncio
async def test_incorrect_check_auth(aresponses, session):
add_auth_request(aresponses)
client = AlfaClient(
hostname='demo.s20.online',
email='test@test.example',
api_key='api_key',
branch_id=1,
session=session,
)
assert await client.check_auth() is True
| true | true |
1c3a72806bd68dbefacd821759f72754e6c94a7f | 905 | py | Python | rastervision2/core/rv_pipeline/chip_classification.py | alexchunet/raster-vision | 76e2965557fc8380e2ffc4aa7ab1f5dc45f79033 | [
"Apache-2.0"
] | 1 | 2020-05-27T07:07:58.000Z | 2020-05-27T07:07:58.000Z | rastervision2/core/rv_pipeline/chip_classification.py | alexchunet/raster-vision | 76e2965557fc8380e2ffc4aa7ab1f5dc45f79033 | [
"Apache-2.0"
] | null | null | null | rastervision2/core/rv_pipeline/chip_classification.py | alexchunet/raster-vision | 76e2965557fc8380e2ffc4aa7ab1f5dc45f79033 | [
"Apache-2.0"
] | null | null | null | import logging
import numpy as np
from rastervision2.core.rv_pipeline.rv_pipeline import RVPipeline
from rastervision2.core.box import Box
log = logging.getLogger(__name__)
def get_train_windows(scene, chip_size):
train_windows = []
extent = scene.raster_source.get_extent()
stride = chip_size
windows = extent.get_windows(chip_size, stride)
if scene.aoi_polygons:
windows = Box.filter_by_aoi(windows, scene.aoi_polygons)
for window in windows:
chip = scene.raster_source.get_chip(window)
if np.sum(chip.ravel()) > 0:
train_windows.append(window)
return train_windows
class ChipClassification(RVPipeline):
def get_train_windows(self, scene):
return get_train_windows(scene, self.config.train_chip_sz)
def get_train_labels(self, window, scene):
return scene.ground_truth_label_source.get_labels(window=window)
| 29.193548 | 72 | 0.738122 | import logging
import numpy as np
from rastervision2.core.rv_pipeline.rv_pipeline import RVPipeline
from rastervision2.core.box import Box
log = logging.getLogger(__name__)
def get_train_windows(scene, chip_size):
train_windows = []
extent = scene.raster_source.get_extent()
stride = chip_size
windows = extent.get_windows(chip_size, stride)
if scene.aoi_polygons:
windows = Box.filter_by_aoi(windows, scene.aoi_polygons)
for window in windows:
chip = scene.raster_source.get_chip(window)
if np.sum(chip.ravel()) > 0:
train_windows.append(window)
return train_windows
class ChipClassification(RVPipeline):
def get_train_windows(self, scene):
return get_train_windows(scene, self.config.train_chip_sz)
def get_train_labels(self, window, scene):
return scene.ground_truth_label_source.get_labels(window=window)
| true | true |
1c3a74dca3d84a11503553d986e556a6ac3ec2ed | 2,877 | py | Python | s3pypi/__main__.py | andrei-shabanski/s3pypi | 48718149cf43d6e3252712d072d5b0de850bac55 | [
"MIT"
] | null | null | null | s3pypi/__main__.py | andrei-shabanski/s3pypi | 48718149cf43d6e3252712d072d5b0de850bac55 | [
"MIT"
] | null | null | null | s3pypi/__main__.py | andrei-shabanski/s3pypi | 48718149cf43d6e3252712d072d5b0de850bac55 | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse
import logging
import sys
from pathlib import Path
from typing import Dict
from s3pypi import __prog__, __version__, core
logging.basicConfig()
log = logging.getLogger(__prog__)
def string_dict(text: str) -> Dict[str, str]:
return dict(tuple(item.strip().split("=", 1)) for item in text.split(",")) # type: ignore
def get_arg_parser():
p = argparse.ArgumentParser(prog=__prog__)
p.add_argument(
"dist",
nargs="+",
type=Path,
help="The distribution files to upload to S3. Usually `dist/*`.",
)
p.add_argument("-b", "--bucket", required=True, help="The S3 bucket to upload to.")
p.add_argument("--profile", help="Optional AWS profile to use.")
p.add_argument("--region", help="Optional AWS region to target.")
p.add_argument("--prefix", help="Optional prefix to use for S3 object names.")
p.add_argument("--acl", help="Optional canned ACL to use for S3 objects.")
p.add_argument("--s3-endpoint-url", help="Optional custom S3 endpoint URL.")
p.add_argument(
"--s3-put-args",
type=string_dict,
help=(
"Optional extra arguments to S3 PutObject calls. Example: "
"'ServerSideEncryption=aws:kms,SSEKMSKeyId=1234...'"
),
)
p.add_argument(
"--unsafe-s3-website",
action="store_true",
help=(
"Store the index as an S3 object named `<package>/index.html` instead of `<package>/`. "
"This option is provided for backwards compatibility with S3 website endpoints, "
"the use of which is discouraged because they require the bucket to be publicly accessible. "
"It's recommended to instead use a private S3 bucket with a CloudFront Origin Access Identity."
),
)
p.add_argument(
"-l",
"--lock-indexes",
action="store_true",
help=(
"Lock index objects in S3 using a DynamoDB table named `<bucket>-locks`. "
"This ensures that concurrent invocations of s3pypi do not overwrite each other's changes."
),
)
p.add_argument(
"--put-root-index",
action="store_true",
help="Write a root index that lists all available package names.",
)
p.add_argument("-f", "--force", action="store_true", help="Overwrite files.")
p.add_argument("-v", "--verbose", action="store_true", help="Verbose output.")
p.add_argument("-V", "--version", action="version", version=__version__)
return p
def main(*args):
kwargs = vars(get_arg_parser().parse_args(args or sys.argv[1:]))
log.setLevel(logging.DEBUG if kwargs.pop("verbose") else logging.INFO)
try:
core.upload_packages(**kwargs)
except core.S3PyPiError as e:
sys.exit(f"ERROR: {e}")
if __name__ == "__main__":
main()
| 34.662651 | 107 | 0.635732 | from __future__ import print_function
import argparse
import logging
import sys
from pathlib import Path
from typing import Dict
from s3pypi import __prog__, __version__, core
logging.basicConfig()
log = logging.getLogger(__prog__)
def string_dict(text: str) -> Dict[str, str]:
return dict(tuple(item.strip().split("=", 1)) for item in text.split(","))
def get_arg_parser():
p = argparse.ArgumentParser(prog=__prog__)
p.add_argument(
"dist",
nargs="+",
type=Path,
help="The distribution files to upload to S3. Usually `dist/*`.",
)
p.add_argument("-b", "--bucket", required=True, help="The S3 bucket to upload to.")
p.add_argument("--profile", help="Optional AWS profile to use.")
p.add_argument("--region", help="Optional AWS region to target.")
p.add_argument("--prefix", help="Optional prefix to use for S3 object names.")
p.add_argument("--acl", help="Optional canned ACL to use for S3 objects.")
p.add_argument("--s3-endpoint-url", help="Optional custom S3 endpoint URL.")
p.add_argument(
"--s3-put-args",
type=string_dict,
help=(
"Optional extra arguments to S3 PutObject calls. Example: "
"'ServerSideEncryption=aws:kms,SSEKMSKeyId=1234...'"
),
)
p.add_argument(
"--unsafe-s3-website",
action="store_true",
help=(
"Store the index as an S3 object named `<package>/index.html` instead of `<package>/`. "
"This option is provided for backwards compatibility with S3 website endpoints, "
"the use of which is discouraged because they require the bucket to be publicly accessible. "
"It's recommended to instead use a private S3 bucket with a CloudFront Origin Access Identity."
),
)
p.add_argument(
"-l",
"--lock-indexes",
action="store_true",
help=(
"Lock index objects in S3 using a DynamoDB table named `<bucket>-locks`. "
"This ensures that concurrent invocations of s3pypi do not overwrite each other's changes."
),
)
p.add_argument(
"--put-root-index",
action="store_true",
help="Write a root index that lists all available package names.",
)
p.add_argument("-f", "--force", action="store_true", help="Overwrite files.")
p.add_argument("-v", "--verbose", action="store_true", help="Verbose output.")
p.add_argument("-V", "--version", action="version", version=__version__)
return p
def main(*args):
kwargs = vars(get_arg_parser().parse_args(args or sys.argv[1:]))
log.setLevel(logging.DEBUG if kwargs.pop("verbose") else logging.INFO)
try:
core.upload_packages(**kwargs)
except core.S3PyPiError as e:
sys.exit(f"ERROR: {e}")
if __name__ == "__main__":
main()
| true | true |
1c3a75ba491a649ac5339b068c1a2c8baaf61bf3 | 4,048 | py | Python | psda/demo.py | aswart/PSDA | 2bdd071e6a3dee89827900553185a98a38292843 | [
"MIT"
] | null | null | null | psda/demo.py | aswart/PSDA | 2bdd071e6a3dee89827900553185a98a38292843 | [
"MIT"
] | null | null | null | psda/demo.py | aswart/PSDA | 2bdd071e6a3dee89827900553185a98a38292843 | [
"MIT"
] | null | null | null | import numpy as np
from numpy.random import randn, randint
import matplotlib.pyplot as plt
from psda import VMF, PSDA, decompose, atleast2
from pyllr import quick_eval
"""
This demo uses a quick-and-dirty data simulator, using Gaussians, not VMF.
It does not work for high dimensions. But you can play with dim = 2 or 3
if you like.
"""
dim = 20
b, w = 10, 50 # within, between concentrations
ns = 100 # number of training speakers
n = 1000 # numer of training examples
# set up model to sample from
norm, mu = decompose(randn(dim))
model0 = PSDA(w, VMF(mu, b))
Z = model0.sample_speakers(ns)
labels = randint(ns,size=(n,))
uu, labels, counts = np.unique(labels, return_inverse=True, return_counts=True)
# sample training data
Xtrain = model0.sample(Z, labels)
if dim == 2:
plt.figure()
plt.scatter(Xtrain[:,0],Xtrain[:,1])
plt.axis('square')
plt.xlim(-1.2,1.2)
plt.ylim(-1.2,1.2)
plt.grid()
plt.title('Embeddings')
plt.show()
# one hot label matrix
L = np.full((n,len(counts)),False) # (n, ns)
L[np.arange(n),labels] = True
# these are the 1st-order stats required by the em traning
means = (L.T @ Xtrain) / counts.reshape(-1,1)
# filter out singleton speakers
means, counts = atleast2(means, counts)
# train the model!
model, obj = PSDA.em(means, counts, niters=10)
plt.figure()
plt.plot(obj,'-*')
plt.grid()
plt.title('PSDA EM algorithm')
plt.xlabel('iteration')
plt.ylabel('marginal likelihood')
plt.show()
# generate some test data
nt = 10000
Z1 = model0.sample_speakers(nt)
Z2 = model0.sample_speakers(nt)
Enroll = model0.sample(Z1, np.arange(nt)) # enrollment embeddings
Test1 = model0.sample(Z1, np.arange(nt)) # target test embeddings
Test2 = model0.sample(Z2, np.arange(nt)) # nnotar test embeddings
# compute PSDA scores
E = model.prep(Enroll)
T1 = model.prep(Test1)
T2 = model.prep(Test2)
tar = E.llr_vector(T1)
non = E.llr_vector(T2)
# compute cosine scores
tarc = (Enroll*Test1).sum(axis=-1)
nonc = (Enroll*Test2).sum(axis=-1)
plt.figure()
plt.plot(non,nonc,'.',label='non')
plt.plot(tar,tarc,'.',label='tar')
plt.grid()
plt.xlabel('PSDA score')
plt.ylabel('cosine score')
plt.legend()
plt.show()
# compute double-enroll PSDA scores
Enroll2 = model0.sample(Z1, np.arange(nt)) # 2nd enrollment embeddings
E2 = model.prep(Enroll + Enroll2)
tar2 = E2.llr_vector(T1)
non2 = E2.llr_vector(T2)
# compute double-enroll cosine scores
E2c = decompose(Enroll + Enroll2)[1]
tar2c = (E2c*Test1).sum(axis=-1)
non2c = (E2c*Test2).sum(axis=-1)
tar12 = np.hstack([tar,tar2])
non12 = np.hstack([non,non2])
tar12c = np.hstack([tarc,tar2c])
non12c = np.hstack([nonc,non2c])
eer_p, cllr_p, mincllr_p = quick_eval.tarnon_2_eer_cllr_mincllr(tar, non)
eer_p2, cllr_p2, mincllr_p2 = quick_eval.tarnon_2_eer_cllr_mincllr(tar2, non2)
eer_c, cllr_c, mincllr_c = quick_eval.tarnon_2_eer_cllr_mincllr(tarc, nonc)
eer_c2, cllr_c2, mincllr_c2 = quick_eval.tarnon_2_eer_cllr_mincllr(tar2c, non2c)
eer_p12, cllr_p12, mincllr_p12 = quick_eval.tarnon_2_eer_cllr_mincllr(tar12, non12)
eer_c12, cllr_c12, mincllr_c12 = quick_eval.tarnon_2_eer_cllr_mincllr(tar12c, non12c)
print("\n\nCosine scoring, single enroll:")
print(f" EER: {eer_c*100:.1f}%")
print(f" Cllr: {cllr_c:.3f}")
print(f" minCllr: {mincllr_c:.3f}")
print("\nPSDA scoring, single enroll:")
print(f" EER: {eer_p*100:.1f}%")
print(f" Cllr: {cllr_p:.3f}")
print(f" minCllr: {mincllr_p:.3f}")
print("\nCosine scoring, double enroll:")
print(f" EER: {eer_c2*100:.1f}%")
print(f" Cllr: {cllr_c2:.3f}")
print(f" minCllr: {mincllr_c2:.3f}")
print("\nPSDA scoring, double enroll:")
print(f" EER: {eer_p2*100:.1f}%")
print(f" Cllr: {cllr_p2:.3f}")
print(f" minCllr: {mincllr_p2:.3f}")
print("\nCosine scoring, mixed enroll:")
print(f" EER: {eer_c12*100:.1f}%")
print(f" Cllr: {cllr_c12:.3f}")
print(f" minCllr: {mincllr_c12:.3f}")
print("\nPSDA scoring, mixed enroll:")
print(f" EER: {eer_p12*100:.1f}%")
print(f" Cllr: {cllr_p12:.3f}")
print(f" minCllr: {mincllr_p12:.3f}")
| 26.457516 | 85 | 0.693182 | import numpy as np
from numpy.random import randn, randint
import matplotlib.pyplot as plt
from psda import VMF, PSDA, decompose, atleast2
from pyllr import quick_eval
dim = 20
b, w = 10, 50
ns = 100
n = 1000
norm, mu = decompose(randn(dim))
model0 = PSDA(w, VMF(mu, b))
Z = model0.sample_speakers(ns)
labels = randint(ns,size=(n,))
uu, labels, counts = np.unique(labels, return_inverse=True, return_counts=True)
Xtrain = model0.sample(Z, labels)
if dim == 2:
plt.figure()
plt.scatter(Xtrain[:,0],Xtrain[:,1])
plt.axis('square')
plt.xlim(-1.2,1.2)
plt.ylim(-1.2,1.2)
plt.grid()
plt.title('Embeddings')
plt.show()
L = np.full((n,len(counts)),False)
L[np.arange(n),labels] = True
means = (L.T @ Xtrain) / counts.reshape(-1,1)
means, counts = atleast2(means, counts)
model, obj = PSDA.em(means, counts, niters=10)
plt.figure()
plt.plot(obj,'-*')
plt.grid()
plt.title('PSDA EM algorithm')
plt.xlabel('iteration')
plt.ylabel('marginal likelihood')
plt.show()
nt = 10000
Z1 = model0.sample_speakers(nt)
Z2 = model0.sample_speakers(nt)
Enroll = model0.sample(Z1, np.arange(nt))
Test1 = model0.sample(Z1, np.arange(nt))
Test2 = model0.sample(Z2, np.arange(nt))
E = model.prep(Enroll)
T1 = model.prep(Test1)
T2 = model.prep(Test2)
tar = E.llr_vector(T1)
non = E.llr_vector(T2)
tarc = (Enroll*Test1).sum(axis=-1)
nonc = (Enroll*Test2).sum(axis=-1)
plt.figure()
plt.plot(non,nonc,'.',label='non')
plt.plot(tar,tarc,'.',label='tar')
plt.grid()
plt.xlabel('PSDA score')
plt.ylabel('cosine score')
plt.legend()
plt.show()
Enroll2 = model0.sample(Z1, np.arange(nt))
E2 = model.prep(Enroll + Enroll2)
tar2 = E2.llr_vector(T1)
non2 = E2.llr_vector(T2)
E2c = decompose(Enroll + Enroll2)[1]
tar2c = (E2c*Test1).sum(axis=-1)
non2c = (E2c*Test2).sum(axis=-1)
tar12 = np.hstack([tar,tar2])
non12 = np.hstack([non,non2])
tar12c = np.hstack([tarc,tar2c])
non12c = np.hstack([nonc,non2c])
eer_p, cllr_p, mincllr_p = quick_eval.tarnon_2_eer_cllr_mincllr(tar, non)
eer_p2, cllr_p2, mincllr_p2 = quick_eval.tarnon_2_eer_cllr_mincllr(tar2, non2)
eer_c, cllr_c, mincllr_c = quick_eval.tarnon_2_eer_cllr_mincllr(tarc, nonc)
eer_c2, cllr_c2, mincllr_c2 = quick_eval.tarnon_2_eer_cllr_mincllr(tar2c, non2c)
eer_p12, cllr_p12, mincllr_p12 = quick_eval.tarnon_2_eer_cllr_mincllr(tar12, non12)
eer_c12, cllr_c12, mincllr_c12 = quick_eval.tarnon_2_eer_cllr_mincllr(tar12c, non12c)
print("\n\nCosine scoring, single enroll:")
print(f" EER: {eer_c*100:.1f}%")
print(f" Cllr: {cllr_c:.3f}")
print(f" minCllr: {mincllr_c:.3f}")
print("\nPSDA scoring, single enroll:")
print(f" EER: {eer_p*100:.1f}%")
print(f" Cllr: {cllr_p:.3f}")
print(f" minCllr: {mincllr_p:.3f}")
print("\nCosine scoring, double enroll:")
print(f" EER: {eer_c2*100:.1f}%")
print(f" Cllr: {cllr_c2:.3f}")
print(f" minCllr: {mincllr_c2:.3f}")
print("\nPSDA scoring, double enroll:")
print(f" EER: {eer_p2*100:.1f}%")
print(f" Cllr: {cllr_p2:.3f}")
print(f" minCllr: {mincllr_p2:.3f}")
print("\nCosine scoring, mixed enroll:")
print(f" EER: {eer_c12*100:.1f}%")
print(f" Cllr: {cllr_c12:.3f}")
print(f" minCllr: {mincllr_c12:.3f}")
print("\nPSDA scoring, mixed enroll:")
print(f" EER: {eer_p12*100:.1f}%")
print(f" Cllr: {cllr_p12:.3f}")
print(f" minCllr: {mincllr_p12:.3f}")
| true | true |
1c3a763bfa27750340ecc34c91ff0be3c4d675bc | 815 | py | Python | ontask/migrations/0023_auto_20191230_0837.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 33 | 2017-12-02T04:09:24.000Z | 2021-11-07T08:41:57.000Z | ontask/migrations/0023_auto_20191230_0837.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 189 | 2017-11-16T04:06:29.000Z | 2022-03-11T23:35:59.000Z | ontask/migrations/0023_auto_20191230_0837.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 30 | 2017-11-30T03:35:44.000Z | 2022-01-31T03:08:08.000Z | # Generated by Django 2.2.8 on 2019-12-29 22:07
from django.db import migrations
def move_excluded_items_to_payload(apps, schema_editor):
if schema_editor.connection.alias != 'default':
return
# Traverse the scheduled actions and move the exclude_items field from
# the object field to the payload
ScheduledOperation = apps.get_model('ontask', 'ScheduledOperation')
for item in ScheduledOperation.objects.all():
if item.exclude_values:
item.payload['exclude_values'] = item.exclude_values
item.save()
class Migration(migrations.Migration):
"""Move excluded_items to the paylaod."""
dependencies = [
('ontask', '0022_auto_20191222_0931'),
]
operations = [
migrations.RunPython(move_excluded_items_to_payload),
]
| 28.103448 | 74 | 0.694479 |
from django.db import migrations
def move_excluded_items_to_payload(apps, schema_editor):
if schema_editor.connection.alias != 'default':
return
ScheduledOperation = apps.get_model('ontask', 'ScheduledOperation')
for item in ScheduledOperation.objects.all():
if item.exclude_values:
item.payload['exclude_values'] = item.exclude_values
item.save()
class Migration(migrations.Migration):
dependencies = [
('ontask', '0022_auto_20191222_0931'),
]
operations = [
migrations.RunPython(move_excluded_items_to_payload),
]
| true | true |
1c3a7688b9e25e9cb16c6b395e6dfd222433496f | 5,442 | py | Python | pyrodash/geometrics/parallelepiped.py | Raudcu/pyrodash | 3671086ef57c097fa055a908a65401eb6648c69a | [
"MIT"
] | 1 | 2021-05-19T10:27:59.000Z | 2021-05-19T10:27:59.000Z | pyrodash/geometrics/parallelepiped.py | Raudcu/pyrodash | 3671086ef57c097fa055a908a65401eb6648c69a | [
"MIT"
] | null | null | null | pyrodash/geometrics/parallelepiped.py | Raudcu/pyrodash | 3671086ef57c097fa055a908a65401eb6648c69a | [
"MIT"
] | null | null | null | import numpy as np
from itertools import product
import plotly.graph_objects as go
class Parallelepiped:
"""
Class to build and draw a Parallelepiped.
...
Attributes
----------
L : float or numpy array
x, y, z lengths of the parallelepiped sides. If float, a cube of
side L is built
initial_vertex_position : numpy array
x, y, z coordinates of the initial vertex position.
vertices : numpy array
vertices coordinates of the parallelepiped.
face_vertices : numpy array
vertices coordinates of each face.
faces : list of plotly go
scatter 3d plotly objects of the parallelepiped faces.
"""
def __init__(
self,
L,
initial_vertex_position=[0, 0, 0],
edge_color="black",
edge_width=2,
face_opacity=0,
):
"""
Parameters
----------
L : float or list of float or numpy array
x, y, z lengths of the parallelepiped sides. If float, a cube
of side L is built.
initial_vertex_position : list of float or numpy array, optional
x, y, z coordinates of the initial vertex position,
by default [0, 0, 0].
edge_color : str, optional
rgb, rgba, hex, hsl, hsv, or named color string for the edge
color, by default "black".
edge_width : float, optional
edge width, by default 1.5.
face_opacity : int between or equal to 0 and 1, optional
opacity of the faces, by default 0.
"""
if isinstance(L, (list, np.ndarray)):
self.L = np.array(L)
else:
self.L = L
self.initial_vertex_position = np.array(initial_vertex_position)
if isinstance(self.L, np.ndarray):
self.vertices = self.initial_vertex_position + np.array(
list(product([0, self.L[0]], [0, self.L[1]], [0, self.L[2]]))
)
else:
self.vertices = self.initial_vertex_position + np.array(
list(product([0, self.L], [0, self.L], [0, self.L]))
)
self.face_vertices = self._face_vertices_calculation()
self.faces = self._draw_faces(edge_color, edge_width, face_opacity)
def _face_vertices_calculation(self):
"""Calculates the vertices coordinates of each parallelepiped face.
Returns
-------
face_vertices : numpy array
vertices coordinates of each face.
"""
face_vertices = []
# The six ways of grabbing four points between the eight
# parallelepiped vertices.
faces = [
(2, 0, 1, 3),
(4, 6, 7, 5),
(6, 2, 3, 7),
(0, 4, 5, 1),
(0, 4, 6, 2),
(1, 5, 7, 3),
]
for face in faces:
# The x,y,z coordinates of each of the four face vertex.
vert_x = self.vertices[face, 0]
vert_y = self.vertices[face, 1]
vert_z = self.vertices[face, 2]
face_vertices.append(
[np.array(vert) for vert in zip(vert_x, vert_y, vert_z)]
)
return np.array(face_vertices)
def _draw_faces(self, edge_color, edge_width, face_opacity):
"""Generates the plotly scatter 3d for the parallelepiped faces.
It builds each face from the face vertices by generating two
scatters: one to ensure the edges with the proper color and width,
and the other one for the faces.
Parameters
----------
edge_color : str
rgb, rgba, hex, hsl, hsv, or named color string for the edge
color.
edge_width : float.
edge width.
face_opacity : int between or equal to 0 and 1
opacity of the faces.
Returns
-------
faces : list of plotly go
scatter 3d plotly objects of the parallelepiped faces.
"""
faces = []
for i, vert in enumerate(self.face_vertices):
faces.append(
go.Scatter3d(
x=vert[:, 0],
y=vert[:, 1],
z=vert[:, 2],
mode="lines",
line=dict(color=edge_color, width=edge_width),
hoverinfo="none",
showlegend=False,
)
)
faces.append(
go.Scatter3d(
x=vert[:, 0],
y=vert[:, 1],
z=vert[:, 2],
mode="lines",
line=dict(color="gray", width=0),
opacity=face_opacity,
surfaceaxis=i // 2,
surfacecolor="gray",
hoverinfo="none",
showlegend=False,
)
)
return faces
if __name__ == "__main__":
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
p = Parallelepiped([1, 2, 3], [1, 1, 1], "orange", 10, 0.2)
fig = go.Figure(data=p.faces)
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(dcc.Graph(figure=fig))
app.run_server(debug=True)
| 29.901099 | 77 | 0.534546 | import numpy as np
from itertools import product
import plotly.graph_objects as go
class Parallelepiped:
def __init__(
self,
L,
initial_vertex_position=[0, 0, 0],
edge_color="black",
edge_width=2,
face_opacity=0,
):
if isinstance(L, (list, np.ndarray)):
self.L = np.array(L)
else:
self.L = L
self.initial_vertex_position = np.array(initial_vertex_position)
if isinstance(self.L, np.ndarray):
self.vertices = self.initial_vertex_position + np.array(
list(product([0, self.L[0]], [0, self.L[1]], [0, self.L[2]]))
)
else:
self.vertices = self.initial_vertex_position + np.array(
list(product([0, self.L], [0, self.L], [0, self.L]))
)
self.face_vertices = self._face_vertices_calculation()
self.faces = self._draw_faces(edge_color, edge_width, face_opacity)
def _face_vertices_calculation(self):
face_vertices = []
faces = [
(2, 0, 1, 3),
(4, 6, 7, 5),
(6, 2, 3, 7),
(0, 4, 5, 1),
(0, 4, 6, 2),
(1, 5, 7, 3),
]
for face in faces:
vert_x = self.vertices[face, 0]
vert_y = self.vertices[face, 1]
vert_z = self.vertices[face, 2]
face_vertices.append(
[np.array(vert) for vert in zip(vert_x, vert_y, vert_z)]
)
return np.array(face_vertices)
def _draw_faces(self, edge_color, edge_width, face_opacity):
faces = []
for i, vert in enumerate(self.face_vertices):
faces.append(
go.Scatter3d(
x=vert[:, 0],
y=vert[:, 1],
z=vert[:, 2],
mode="lines",
line=dict(color=edge_color, width=edge_width),
hoverinfo="none",
showlegend=False,
)
)
faces.append(
go.Scatter3d(
x=vert[:, 0],
y=vert[:, 1],
z=vert[:, 2],
mode="lines",
line=dict(color="gray", width=0),
opacity=face_opacity,
surfaceaxis=i // 2,
surfacecolor="gray",
hoverinfo="none",
showlegend=False,
)
)
return faces
if __name__ == "__main__":
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
p = Parallelepiped([1, 2, 3], [1, 1, 1], "orange", 10, 0.2)
fig = go.Figure(data=p.faces)
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(dcc.Graph(figure=fig))
app.run_server(debug=True)
| true | true |
1c3a76a7ee7ba27b5033d6a016d79217d35b84e6 | 1,265 | py | Python | src/tests/unit/test_booking_service.py | jorzel/opentable | 712182ae7ca39cda601d0c9e14a28b56794dc2dd | [
"MIT"
] | 9 | 2021-02-23T13:49:11.000Z | 2022-03-27T20:52:16.000Z | src/tests/unit/test_booking_service.py | jorzel/opentable | 712182ae7ca39cda601d0c9e14a28b56794dc2dd | [
"MIT"
] | null | null | null | src/tests/unit/test_booking_service.py | jorzel/opentable | 712182ae7ca39cda601d0c9e14a28b56794dc2dd | [
"MIT"
] | null | null | null | from unittest.mock import ANY, Mock
import pytest
from application.services.booking_table import BookingTableApplicationService
from domain.commands import BookTableCommand
from domain.events.table import BookedTableEvent
from infrastructure.db.memory.repository import MemoryRestaurantRepository
from infrastructure.db.memory.uow import FakeUnitOfWork
@pytest.fixture
def mocked_event_publisher():
return Mock()
def test_booking_service_book_table_should_pass_when_table_in_restaurant_is_available(
restaurant_factory,
table_factory,
mocked_event_publisher,
):
repository = MemoryRestaurantRepository()
booking_service = BookingTableApplicationService(
repository, FakeUnitOfWork(), mocked_event_publisher
)
table = table_factory(table_id=1, max_persons=5, is_open=True)
restaurant = restaurant_factory(
restaurant_id=1, tables=[table], repository=repository
)
command = BookTableCommand(restaurant.id, persons=2)
booking_service.book_table(command)
assert table.is_open is False
mocked_event_publisher.publish.assert_called_once_with(
[
BookedTableEvent(
table_id=table.id, restaurant_id=restaurant.id, booked_at=ANY
)
]
)
| 30.119048 | 86 | 0.765217 | from unittest.mock import ANY, Mock
import pytest
from application.services.booking_table import BookingTableApplicationService
from domain.commands import BookTableCommand
from domain.events.table import BookedTableEvent
from infrastructure.db.memory.repository import MemoryRestaurantRepository
from infrastructure.db.memory.uow import FakeUnitOfWork
@pytest.fixture
def mocked_event_publisher():
return Mock()
def test_booking_service_book_table_should_pass_when_table_in_restaurant_is_available(
restaurant_factory,
table_factory,
mocked_event_publisher,
):
repository = MemoryRestaurantRepository()
booking_service = BookingTableApplicationService(
repository, FakeUnitOfWork(), mocked_event_publisher
)
table = table_factory(table_id=1, max_persons=5, is_open=True)
restaurant = restaurant_factory(
restaurant_id=1, tables=[table], repository=repository
)
command = BookTableCommand(restaurant.id, persons=2)
booking_service.book_table(command)
assert table.is_open is False
mocked_event_publisher.publish.assert_called_once_with(
[
BookedTableEvent(
table_id=table.id, restaurant_id=restaurant.id, booked_at=ANY
)
]
)
| true | true |
1c3a76aa4aed1ee5ba205b4f9f05ac25c089bb24 | 1,259 | py | Python | tests/test_template_with_from_stdout.py | tsvetkov-ii/airflow-declarative | 7524030e333b05d3caf09cf5f069fbe1106397c1 | [
"Apache-2.0"
] | 126 | 2017-07-27T16:19:56.000Z | 2021-11-26T22:10:49.000Z | tests/test_template_with_from_stdout.py | tsvetkov-ii/airflow-declarative | 7524030e333b05d3caf09cf5f069fbe1106397c1 | [
"Apache-2.0"
] | 26 | 2017-07-30T11:38:40.000Z | 2021-09-11T15:24:04.000Z | tests/test_template_with_from_stdout.py | tsvetkov-ii/airflow-declarative | 7524030e333b05d3caf09cf5f069fbe1106397c1 | [
"Apache-2.0"
] | 9 | 2018-04-03T22:07:35.000Z | 2020-04-30T18:21:32.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2019, Rambler Digital Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import airflow_declarative
from airflow_declarative.operators import GenericOperator
@pytest.fixture()
def dag(good_dag_path):
path = good_dag_path("template-with-from-stdout")
dags = airflow_declarative.from_path(path)
assert len(dags) == 1
dag = dags[0]
return dag
def test_callback_params(dag):
foo = dag.task_dict["operator_foo"]
assert isinstance(foo, GenericOperator)
foo.execute({})
assert foo._callback_instance.param == {"hello": ["привет"]}
assert set(dag.task_dict) == {"operator_foo"}
| 27.977778 | 82 | 0.743447 |
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import airflow_declarative
from airflow_declarative.operators import GenericOperator
@pytest.fixture()
def dag(good_dag_path):
path = good_dag_path("template-with-from-stdout")
dags = airflow_declarative.from_path(path)
assert len(dags) == 1
dag = dags[0]
return dag
def test_callback_params(dag):
foo = dag.task_dict["operator_foo"]
assert isinstance(foo, GenericOperator)
foo.execute({})
assert foo._callback_instance.param == {"hello": ["привет"]}
assert set(dag.task_dict) == {"operator_foo"}
| true | true |
1c3a77fad5ceaaf2111e691cb0200c1c25872ebc | 1,339 | py | Python | tests/schemas/fields.py | brettcannon/vibora | 1933b631d4df62e7d748016f7463ab746d4695cc | [
"MIT"
] | 1 | 2021-01-03T00:57:53.000Z | 2021-01-03T00:57:53.000Z | tests/schemas/fields.py | brettcannon/vibora | 1933b631d4df62e7d748016f7463ab746d4695cc | [
"MIT"
] | null | null | null | tests/schemas/fields.py | brettcannon/vibora | 1933b631d4df62e7d748016f7463ab746d4695cc | [
"MIT"
] | 1 | 2019-06-14T10:40:54.000Z | 2019-06-14T10:40:54.000Z | from unittest import TestCase
from vibora.schemas.fields import String
from vibora.schemas.exceptions import ValidationError
class StringTestCase(TestCase):
def test_default__expects_successful(self):
field = String()
self.assertEqual('Test', field.load('Test'))
def test_default_with_integer__expects_casting(self):
field = String()
self.assertEqual('1', field.load('1'))
def test_strict_true__expects_failure_with_integer(self):
field = String(strict=True)
try:
field.load(1)
self.fail('Missing exception')
except ValidationError:
pass
def test_strict_true__expects_successful(self):
field = String(strict=True)
self.assertEqual('TestString', field.load('TestString'))
def test_default__expects_default_instead_of_none(self):
default = 'Test'
field = String(default=default)
self.assertEqual(field.load(None), default)
def test_default_with_empty_string__expects_default(self):
default = 'Test'
field = String(default=default)
self.assertEqual(field.load(''), default)
def test_default_with_non_empty_string__expects_ignored(self):
default = 'Test'
field = String(default=default)
self.assertEqual(field.load('A'), 'A')
| 31.880952 | 66 | 0.681105 | from unittest import TestCase
from vibora.schemas.fields import String
from vibora.schemas.exceptions import ValidationError
class StringTestCase(TestCase):
def test_default__expects_successful(self):
field = String()
self.assertEqual('Test', field.load('Test'))
def test_default_with_integer__expects_casting(self):
field = String()
self.assertEqual('1', field.load('1'))
def test_strict_true__expects_failure_with_integer(self):
field = String(strict=True)
try:
field.load(1)
self.fail('Missing exception')
except ValidationError:
pass
def test_strict_true__expects_successful(self):
field = String(strict=True)
self.assertEqual('TestString', field.load('TestString'))
def test_default__expects_default_instead_of_none(self):
default = 'Test'
field = String(default=default)
self.assertEqual(field.load(None), default)
def test_default_with_empty_string__expects_default(self):
default = 'Test'
field = String(default=default)
self.assertEqual(field.load(''), default)
def test_default_with_non_empty_string__expects_ignored(self):
default = 'Test'
field = String(default=default)
self.assertEqual(field.load('A'), 'A')
| true | true |
1c3a787bd0e947f8f10a5d680d6a48e3995cf9bd | 3,241 | py | Python | examples/src/main/python/als.py | dongwang218/spark | 48fc38f5844f6c12bf440f2990b6d7f1630fafac | [
"Apache-2.0",
"MIT"
] | 5 | 2017-04-13T01:00:13.000Z | 2020-01-02T06:38:49.000Z | examples/src/main/python/als.py | newscred/spark | 257236c3e17906098f801cbc2059e7a9054e8cab | [
"Apache-2.0",
"MIT"
] | 9 | 2015-09-25T20:21:52.000Z | 2020-02-04T18:25:21.000Z | examples/src/main/python/als.py | newscred/spark | 257236c3e17906098f801cbc2059e7a9054e8cab | [
"Apache-2.0",
"MIT"
] | 15 | 2017-01-12T10:41:50.000Z | 2019-04-19T08:28:15.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This is an example implementation of ALS for learning how to use Spark. Please refer to
ALS in pyspark.mllib.recommendation for more conventional use.
This example requires numpy (http://www.numpy.org/)
"""
from __future__ import print_function
import sys
import numpy as np
from numpy.random import rand
from numpy import matrix
from pyspark import SparkContext
LAMBDA = 0.01 # regularization
np.random.seed(42)
def rmse(R, ms, us):
diff = R - ms * us.T
return np.sqrt(np.sum(np.power(diff, 2)) / M * U)
def update(i, vec, mat, ratings):
uu = mat.shape[0]
ff = mat.shape[1]
XtX = mat.T * mat
Xty = mat.T * ratings[i, :].T
for j in range(ff):
XtX[j, j] += LAMBDA * uu
return np.linalg.solve(XtX, Xty)
if __name__ == "__main__":
"""
Usage: als [M] [U] [F] [iterations] [partitions]"
"""
print("""WARN: This is a naive implementation of ALS and is given as an
example. Please use the ALS method found in pyspark.mllib.recommendation for more
conventional use.""", file=sys.stderr)
sc = SparkContext(appName="PythonALS")
M = int(sys.argv[1]) if len(sys.argv) > 1 else 100
U = int(sys.argv[2]) if len(sys.argv) > 2 else 500
F = int(sys.argv[3]) if len(sys.argv) > 3 else 10
ITERATIONS = int(sys.argv[4]) if len(sys.argv) > 4 else 5
partitions = int(sys.argv[5]) if len(sys.argv) > 5 else 2
print("Running ALS with M=%d, U=%d, F=%d, iters=%d, partitions=%d\n" %
(M, U, F, ITERATIONS, partitions))
R = matrix(rand(M, F)) * matrix(rand(U, F).T)
ms = matrix(rand(M, F))
us = matrix(rand(U, F))
Rb = sc.broadcast(R)
msb = sc.broadcast(ms)
usb = sc.broadcast(us)
for i in range(ITERATIONS):
ms = sc.parallelize(range(M), partitions) \
.map(lambda x: update(x, msb.value[x, :], usb.value, Rb.value)) \
.collect()
# collect() returns a list, so array ends up being
# a 3-d array, we take the first 2 dims for the matrix
ms = matrix(np.array(ms)[:, :, 0])
msb = sc.broadcast(ms)
us = sc.parallelize(range(U), partitions) \
.map(lambda x: update(x, usb.value[x, :], msb.value, Rb.value.T)) \
.collect()
us = matrix(np.array(us)[:, :, 0])
usb = sc.broadcast(us)
error = rmse(R, ms, us)
print("Iteration %d:" % i)
print("\nRMSE: %5.4f\n" % error)
sc.stop()
| 31.466019 | 87 | 0.636223 |
from __future__ import print_function
import sys
import numpy as np
from numpy.random import rand
from numpy import matrix
from pyspark import SparkContext
LAMBDA = 0.01
np.random.seed(42)
def rmse(R, ms, us):
diff = R - ms * us.T
return np.sqrt(np.sum(np.power(diff, 2)) / M * U)
def update(i, vec, mat, ratings):
uu = mat.shape[0]
ff = mat.shape[1]
XtX = mat.T * mat
Xty = mat.T * ratings[i, :].T
for j in range(ff):
XtX[j, j] += LAMBDA * uu
return np.linalg.solve(XtX, Xty)
if __name__ == "__main__":
print("""WARN: This is a naive implementation of ALS and is given as an
example. Please use the ALS method found in pyspark.mllib.recommendation for more
conventional use.""", file=sys.stderr)
sc = SparkContext(appName="PythonALS")
M = int(sys.argv[1]) if len(sys.argv) > 1 else 100
U = int(sys.argv[2]) if len(sys.argv) > 2 else 500
F = int(sys.argv[3]) if len(sys.argv) > 3 else 10
ITERATIONS = int(sys.argv[4]) if len(sys.argv) > 4 else 5
partitions = int(sys.argv[5]) if len(sys.argv) > 5 else 2
print("Running ALS with M=%d, U=%d, F=%d, iters=%d, partitions=%d\n" %
(M, U, F, ITERATIONS, partitions))
R = matrix(rand(M, F)) * matrix(rand(U, F).T)
ms = matrix(rand(M, F))
us = matrix(rand(U, F))
Rb = sc.broadcast(R)
msb = sc.broadcast(ms)
usb = sc.broadcast(us)
for i in range(ITERATIONS):
ms = sc.parallelize(range(M), partitions) \
.map(lambda x: update(x, msb.value[x, :], usb.value, Rb.value)) \
.collect()
ms = matrix(np.array(ms)[:, :, 0])
msb = sc.broadcast(ms)
us = sc.parallelize(range(U), partitions) \
.map(lambda x: update(x, usb.value[x, :], msb.value, Rb.value.T)) \
.collect()
us = matrix(np.array(us)[:, :, 0])
usb = sc.broadcast(us)
error = rmse(R, ms, us)
print("Iteration %d:" % i)
print("\nRMSE: %5.4f\n" % error)
sc.stop()
| true | true |
1c3a793e81bc992bc5a1a1ed4247323dd86194f0 | 37,914 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/aio/operations_async/_disks_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/aio/operations_async/_disks_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 15 | 2019-07-12T18:18:04.000Z | 2019-07-25T20:55:51.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/aio/operations_async/_disks_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DisksOperations:
"""DisksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "models.Disk",
**kwargs
) -> "models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["models.Disk"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(disk, 'Disk')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
disk_name: str,
disk: "models.Disk",
**kwargs
) -> "models.Disk":
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk operation.
:type disk: ~azure.mgmt.compute.v2019_07_01.models.Disk
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: Disk, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.Disk
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "models.DiskUpdate",
**kwargs
) -> "models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["models.Disk"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(disk, 'DiskUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
disk_name: str,
disk: "models.DiskUpdate",
**kwargs
) -> "models.Disk":
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk operation.
:type disk: ~azure.mgmt.compute.v2019_07_01.models.DiskUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: Disk, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.Disk
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def get(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> "models.Disk":
"""Gets information about a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Disk, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.Disk
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Disk"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> None:
"""Deletes a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: None, or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.DiskList"]:
"""Lists all the disks under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_07_01.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DiskList"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DiskList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["models.DiskList"]:
"""Lists all the disks under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_07_01.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DiskList"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DiskList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks'} # type: ignore
async def _grant_access_initial(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "models.GrantAccessData",
**kwargs
) -> "models.AccessUri":
cls = kwargs.pop('cls', None) # type: ClsType["models.AccessUri"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._grant_access_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(grant_access_data, 'GrantAccessData')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'} # type: ignore
async def begin_grant_access(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "models.GrantAccessData",
**kwargs
) -> "models.AccessUri":
"""Grants access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param grant_access_data: Access data object supplied in the body of the get disk access
operation.
:type grant_access_data: ~azure.mgmt.compute.v2019_07_01.models.GrantAccessData
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: AccessUri, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.AccessUri
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.AccessUri"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._grant_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
grant_access_data=grant_access_data,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'} # type: ignore
async def _revoke_access_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._revoke_access_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revoke_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'} # type: ignore
async def begin_revoke_access(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> None:
"""Revokes access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: None, or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._revoke_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'} # type: ignore
| 47.098137 | 190 | 0.65638 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DisksOperations:
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "models.Disk",
**kwargs
) -> "models.Disk":
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {}
body_content = self._serialize.body(disk, 'Disk')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
async def begin_create_or_update(
self,
resource_group_name: str,
disk_name: str,
disk: "models.Disk",
**kwargs
) -> "models.Disk":
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
async def _update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "models.DiskUpdate",
**kwargs
) -> "models.Disk":
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
url = self._update_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {}
body_content = self._serialize.body(disk, 'DiskUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
async def begin_update(
self,
resource_group_name: str,
disk_name: str,
disk: "models.DiskUpdate",
**kwargs
) -> "models.Disk":
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
async def get(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> "models.Disk":
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
async def _delete_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
async def begin_delete(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> None:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.DiskList"]:
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
def prepare_request(next_link=None):
if not next_link:
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DiskList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks'}
def list(
self,
**kwargs
) -> AsyncIterable["models.DiskList"]:
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
def prepare_request(next_link=None):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DiskList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks'}
async def _grant_access_initial(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "models.GrantAccessData",
**kwargs
) -> "models.AccessUri":
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
url = self._grant_access_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {}
body_content = self._serialize.body(grant_access_data, 'GrantAccessData')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'}
async def begin_grant_access(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "models.GrantAccessData",
**kwargs
) -> "models.AccessUri":
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._grant_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
grant_access_data=grant_access_data,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'}
async def _revoke_access_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
url = self._revoke_access_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revoke_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'}
async def begin_revoke_access(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> None:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._revoke_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'}
| true | true |
1c3a796b2699601f424e3f80a5837fc28a6713db | 16,434 | py | Python | project3-mlops/06-Model-Registry.py | LiuxyEric/dscc202-402-spring2022 | f3877c2dde64656f9d84e3f913340f3fcefdc11b | [
"MIT"
] | null | null | null | project3-mlops/06-Model-Registry.py | LiuxyEric/dscc202-402-spring2022 | f3877c2dde64656f9d84e3f913340f3fcefdc11b | [
"MIT"
] | null | null | null | project3-mlops/06-Model-Registry.py | LiuxyEric/dscc202-402-spring2022 | f3877c2dde64656f9d84e3f913340f3fcefdc11b | [
"MIT"
] | 53 | 2022-01-11T19:06:06.000Z | 2022-03-25T19:27:48.000Z | # Databricks notebook source
# MAGIC %md
# MAGIC # Model Registry
# MAGIC
# MAGIC MLflow Model Registry is a collaborative hub where teams can share ML models, work together from experimentation to online testing and production, integrate with approval and governance workflows, and monitor ML deployments and their performance. This lesson explores how to manage models using the MLflow model registry.
# MAGIC
# MAGIC ##  In this lesson you:<br>
# MAGIC - Register a model using MLflow
# MAGIC - Deploy that model into production
# MAGIC - Update a model in production to new version including a staging phase for testing
# MAGIC - Archive and delete models
# MAGIC
# MAGIC ## Prerequisites
# MAGIC - Web browser: Chrome
# MAGIC - A cluster configured with **8 cores** and **DBR 7.0 ML**
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Classroom-Setup
# MAGIC
# MAGIC For each lesson to execute correctly, please make sure to run the **`Classroom-Setup`** cell at the<br/>
# MAGIC start of each lesson (see the next cell) and the **`Classroom-Cleanup`** cell at the end of each lesson.
# COMMAND ----------
# MAGIC %run "./Includes/Classroom-Setup"
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ### Model Registry
# MAGIC
# MAGIC The MLflow Model Registry component is a centralized model store, set of APIs, and UI, to collaboratively manage the full lifecycle of an MLflow Model. It provides model lineage (which MLflow Experiment and Run produced the model), model versioning, stage transitions (e.g. from staging to production), annotations (e.g. with comments, tags), and deployment management (e.g. which production jobs have requested a specific model version).
# MAGIC
# MAGIC Model registry has the following features:<br><br>
# MAGIC
# MAGIC * **Central Repository:** Register MLflow models with the MLflow Model Registry. A registered model has a unique name, version, stage, and other metadata.
# MAGIC * **Model Versioning:** Automatically keep track of versions for registered models when updated.
# MAGIC * **Model Stage:** Assigned preset or custom stages to each model version, like “Staging” and “Production” to represent the lifecycle of a model.
# MAGIC * **Model Stage Transitions:** Record new registration events or changes as activities that automatically log users, changes, and additional metadata such as comments.
# MAGIC * **CI/CD Workflow Integration:** Record stage transitions, request, review and approve changes as part of CI/CD pipelines for better control and governance.
# MAGIC
# MAGIC <div><img src="https://files.training.databricks.com/images/eLearning/ML-Part-4/model-registry.png" style="height: 400px; margin: 20px"/></div>
# MAGIC
# MAGIC <img alt="Side Note" title="Side Note" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)" src="https://files.training.databricks.com/static/images/icon-note.webp"/> See <a href="https://mlflow.org/docs/latest/registry.html" target="_blank">the MLflow docs</a> for more details on the model registry.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ### Registering a Model
# MAGIC
# MAGIC The following workflow will work with either the UI or in pure Python. This notebook will use pure Python.
# MAGIC
# MAGIC <img alt="Side Note" title="Side Note" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)" src="https://files.training.databricks.com/static/images/icon-note.webp"/> Explore the UI throughout this lesson by clicking the "Models" tab on the left-hand side of the screen.
# COMMAND ----------
# MAGIC %md
# MAGIC Confirm that you are using `mlflow` version 1.7 or higher.
# COMMAND ----------
from distutils.version import LooseVersion, StrictVersion
import mlflow
assert StrictVersion(mlflow.__version__) >= StrictVersion("1.7.0"), "Update MLflow to version 1.7.0+"
# COMMAND ----------
# MAGIC %md
# MAGIC Train a model and log it to MLflow.
# COMMAND ----------
import mlflow
import mlflow.sklearn
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
df = pd.read_csv("/dbfs/mnt/training/airbnb/sf-listings/airbnb-cleaned-mlflow.csv")
X_train, X_test, y_train, y_test = train_test_split(df.drop(["price"], axis=1), df[["price"]].values.ravel(), random_state=42)
rf = RandomForestRegressor(n_estimators=100, max_depth=5)
rf.fit(X_train, y_train)
with mlflow.start_run(run_name="RF Model") as run:
mlflow.sklearn.log_model(rf, "model")
mlflow.log_metric("mse", mean_squared_error(y_test, rf.predict(X_test)))
runID = run.info.run_uuid
# COMMAND ----------
# MAGIC %md
# MAGIC Create a unique model name so you don't clash with other workspace users.
# COMMAND ----------
import uuid
model_name = f"airbnb_rf_model_{uuid.uuid4().hex[:10]}"
model_name
# COMMAND ----------
# MAGIC %md
# MAGIC Register the model.
# COMMAND ----------
model_uri = "runs:/{run_id}/model".format(run_id=runID)
model_details = mlflow.register_model(model_uri=model_uri, name=model_name)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC **Open the *Models* tab on the left of the screen to explore the registered model.** Note the following:<br><br>
# MAGIC
# MAGIC * It logged who trained the model and what code was used
# MAGIC * It logged a history of actions taken on this model
# MAGIC * It logged this model as a first version
# MAGIC
# MAGIC <div><img src="https://files.training.databricks.com/images/eLearning/ML-Part-4/model-registry-1.png" style="height: 400px; margin: 20px"/></div>
# COMMAND ----------
# MAGIC %md
# MAGIC Check the status. It will initially be in `PENDING_REGISTRATION` status.
# COMMAND ----------
from mlflow.tracking.client import MlflowClient
client = MlflowClient()
model_version_details = client.get_model_version(name=model_name, version=1)
model_version_details.status
# COMMAND ----------
# MAGIC %md
# MAGIC Now add a model description
# COMMAND ----------
client.update_registered_model(
name=model_details.name,
description="This model forecasts Airbnb housing list prices based on various listing inputs."
)
# COMMAND ----------
# MAGIC %md
# MAGIC Add a version-specific description.
# COMMAND ----------
client.update_model_version(
name=model_details.name,
version=model_details.version,
description="This model version was built using sklearn."
)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Deploying a Model
# MAGIC
# MAGIC The MLflow Model Registry defines several model stages: `None`, `Staging`, `Production`, and `Archived`. Each stage has a unique meaning. For example, `Staging` is meant for model testing, while `Production` is for models that have completed the testing or review processes and have been deployed to applications.
# MAGIC
# MAGIC Users with appropriate permissions can transition models between stages. In private preview, any user can transition a model to any stage. In the near future, administrators in your organization will be able to control these permissions on a per-user and per-model basis.
# MAGIC
# MAGIC If you have permission to transition a model to a particular stage, you can make the transition directly by using the `MlflowClient.update_model_version()` function. If you do not have permission, you can request a stage transition using the REST API; for example: ```%sh curl -i -X POST -H "X-Databricks-Org-Id: <YOUR_ORG_ID>" -H "Authorization: Bearer <YOUR_ACCESS_TOKEN>" https://<YOUR_DATABRICKS_WORKSPACE_URL>/api/2.0/preview/mlflow/transition-requests/create -d '{"comment": "Please move this model into production!", "model_version": {"version": 1, "registered_model": {"name": "power-forecasting-model"}}, "stage": "Production"}'
# MAGIC ```
# COMMAND ----------
# MAGIC %md
# MAGIC Now that you've learned about stage transitions, transition the model to the `Production` stage.
# COMMAND ----------
import time
time.sleep(10) # In case the registration is still pending
# COMMAND ----------
client.transition_model_version_stage(
name=model_details.name,
version=model_details.version,
stage='Production',
)
# COMMAND ----------
# MAGIC %md
# MAGIC Fetch the model's current status.
# COMMAND ----------
model_version_details = client.get_model_version(
name=model_details.name,
version=model_details.version,
)
print("The current model stage is: '{stage}'".format(stage=model_version_details.current_stage))
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC Fetch the latest model using a `pyfunc`. Loading the model in this way allows us to use the model regardless of the package that was used to train it.
# MAGIC
# MAGIC <img alt="Side Note" title="Side Note" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)" src="https://files.training.databricks.com/static/images/icon-note.webp"/> You can load a specific version of the model too.
# COMMAND ----------
import mlflow.pyfunc
model_version_uri = "models:/{model_name}/1".format(model_name=model_name)
print("Loading registered model version from URI: '{model_uri}'".format(model_uri=model_version_uri))
model_version_1 = mlflow.pyfunc.load_model(model_version_uri)
# COMMAND ----------
# MAGIC %md
# MAGIC Apply the model.
# COMMAND ----------
model_version_1.predict(X_test)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Deploying a New Model Version
# MAGIC
# MAGIC The MLflow Model Registry enables you to create multiple model versions corresponding to a single registered model. By performing stage transitions, you can seamlessly integrate new model versions into your staging or production environments.
# COMMAND ----------
# MAGIC %md
# MAGIC Create a new model version and register that model when it's logged.
# COMMAND ----------
import mlflow
import mlflow.sklearn
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
df = pd.read_csv("/dbfs/mnt/training/airbnb/sf-listings/airbnb-cleaned-mlflow.csv")
X_train, X_test, y_train, y_test = train_test_split(df.drop(["price"], axis=1), df[["price"]].values.ravel(), random_state=42)
rf = RandomForestRegressor(n_estimators=300, max_depth=10)
rf.fit(X_train, y_train)
with mlflow.start_run(run_name="RF Model") as run:
# Specify the `registered_model_name` parameter of the `mlflow.sklearn.log_model()`
# function to register the model with the MLflow Model Registry. This automatically
# creates a new model version
mlflow.sklearn.log_model(
sk_model=rf,
artifact_path="sklearn-model",
registered_model_name=model_name,
)
mlflow.log_metric("mse", mean_squared_error(y_test, rf.predict(X_test)))
runID = run.info.run_uuid
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC Check the UI to see the new model version.
# MAGIC
# MAGIC <div><img src="https://files.training.databricks.com/images/eLearning/ML-Part-4/model-registry-2.png" style="height: 400px; margin: 20px"/></div>
# COMMAND ----------
# MAGIC %md
# MAGIC Use the search functionality to grab the latest model version.
# COMMAND ----------
model_version_infos = client.search_model_versions(f"name = '{model_name}'")
new_model_version = max([model_version_info.version for model_version_info in model_version_infos])
# COMMAND ----------
# MAGIC %md
# MAGIC Add a description to this new version.
# COMMAND ----------
client.update_model_version(
name=model_name,
version=new_model_version,
description="This model version is a random forest containing 300 decision trees and a max depth of 10 that was trained in scikit-learn."
)
# COMMAND ----------
# MAGIC %md
# MAGIC Put this new model version into `Staging`
# COMMAND ----------
import time
time.sleep(10) # In case the registration is still pending
# COMMAND ----------
client.transition_model_version_stage(
name=model_name,
version=new_model_version,
stage="Staging",
)
# COMMAND ----------
# MAGIC %md
# MAGIC Sicne this model is now in staging, you can execute an automated CI/CD pipeline against it to test it before going into production. Once that is completed, you can push that model into production.
# COMMAND ----------
client.transition_model_version_stage(
name=model_name,
version=new_model_version,
stage="Production",
)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Archiving and Deleting
# MAGIC
# MAGIC You can now archive and delete old versions of the model.
# COMMAND ----------
client.transition_model_version_stage(
name=model_name,
version=1,
stage="Archived",
)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC Delete version 1.
# MAGIC
# MAGIC <img alt="Side Note" title="Side Note" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)" src="https://files.training.databricks.com/static/images/icon-note.webp"/> You cannot delete a model that is not first archived.
# COMMAND ----------
client.delete_model_version(
name=model_name,
version=1
)
# COMMAND ----------
# MAGIC %md
# MAGIC Archive version 2 of the model too.
# COMMAND ----------
client.transition_model_version_stage(
name=model_name,
version=2,
stage="Archived",
)
# COMMAND ----------
# MAGIC %md
# MAGIC Now delete the entire registered model.
# COMMAND ----------
client.delete_registered_model(model_name)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review
# MAGIC **Question:** How does MLflow tracking differ from the model registry?
# MAGIC **Answer:** Tracking is meant for experimentation and development. The model registry is designed to take a model from tracking and put it through staging and into production. This is often the point that a data engineer or a machine learning engineer takes responsibility for the depoloyment process.
# MAGIC
# MAGIC **Question:** Why do I need a model registry?
# MAGIC **Answer:** Just as MLflow tracking provides end-to-end reproducibility for the machine learning training process, a model registry provides reproducibility and governance for the deployment process. Since production systems are mission critical, components can be isolated with ACL's so only specific individuals can alter production models. Version control and CI/CD workflow integration is also a critical dimension of deploying models into production.
# MAGIC
# MAGIC **Question:** What can I do programatically versus using the UI?
# MAGIC **Answer:** Most operations can be done using the UI or in pure Python. A model must be tracked using Python, but from that point on everything can be done either way. For instance, a model logged using the MLflow tracking API can then be registered using the UI and can then be pushed into production.
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Classroom-Cleanup<br>
# MAGIC
# MAGIC Run the **`Classroom-Cleanup`** cell below to remove any artifacts created by this lesson.
# COMMAND ----------
# MAGIC %run "./Includes/Classroom-Cleanup"
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Next Steps
# MAGIC
# MAGIC Start the labs for this lesson, [Model Management Lab]($./Labs/05-Lab)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Additional Topics & Resources
# MAGIC
# MAGIC **Q:** Where can I find out more information on MLflow Model Registry?
# MAGIC **A:** Check out <a href="https://mlflow.org/docs/latest/registry.html" target="_blank">the MLflow documentation</a>
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2020 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="http://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="http://help.databricks.com/">Support</a>
| 37.692661 | 645 | 0.73141 |
in_test_split(df.drop(["price"], axis=1), df[["price"]].values.ravel(), random_state=42)
rf = RandomForestRegressor(n_estimators=100, max_depth=5)
rf.fit(X_train, y_train)
with mlflow.start_run(run_name="RF Model") as run:
mlflow.sklearn.log_model(rf, "model")
mlflow.log_metric("mse", mean_squared_error(y_test, rf.predict(X_test)))
runID = run.info.run_uuid
# COMMAND ----------
import uuid
model_name = f"airbnb_rf_model_{uuid.uuid4().hex[:10]}"
model_name
# COMMAND ----------
# MAGIC %md
# MAGIC Register the model.
# COMMAND ----------
model_uri = "runs:/{run_id}/model".format(run_id=runID)
model_details = mlflow.register_model(model_uri=model_uri, name=model_name)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC **Open the *Models* tab on the left of the screen to explore the registered model.** Note the following:<br><br>
# MAGIC
# MAGIC * It logged who trained the model and what code was used
# MAGIC * It logged a history of actions taken on this model
# MAGIC * It logged this model as a first version
# MAGIC
# MAGIC <div><img src="https://files.training.databricks.com/images/eLearning/ML-Part-4/model-registry-1.png" style="height: 400px; margin: 20px"/></div>
# COMMAND ----------
# MAGIC %md
# MAGIC Check the status. It will initially be in `PENDING_REGISTRATION` status.
# COMMAND ----------
from mlflow.tracking.client import MlflowClient
client = MlflowClient()
model_version_details = client.get_model_version(name=model_name, version=1)
model_version_details.status
# COMMAND ----------
# MAGIC %md
# MAGIC Now add a model description
# COMMAND ----------
client.update_registered_model(
name=model_details.name,
description="This model forecasts Airbnb housing list prices based on various listing inputs."
)
# COMMAND ----------
# MAGIC %md
# MAGIC Add a version-specific description.
# COMMAND ----------
client.update_model_version(
name=model_details.name,
version=model_details.version,
description="This model version was built using sklearn."
)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Deploying a Model
# MAGIC
# MAGIC The MLflow Model Registry defines several model stages: `None`, `Staging`, `Production`, and `Archived`. Each stage has a unique meaning. For example, `Staging` is meant for model testing, while `Production` is for models that have completed the testing or review processes and have been deployed to applications.
# MAGIC
# MAGIC Users with appropriate permissions can transition models between stages. In private preview, any user can transition a model to any stage. In the near future, administrators in your organization will be able to control these permissions on a per-user and per-model basis.
# MAGIC
# MAGIC If you have permission to transition a model to a particular stage, you can make the transition directly by using the `MlflowClient.update_model_version()` function. If you do not have permission, you can request a stage transition using the REST API; for example: ```%sh curl -i -X POST -H "X-Databricks-Org-Id: <YOUR_ORG_ID>" -H "Authorization: Bearer <YOUR_ACCESS_TOKEN>" https://<YOUR_DATABRICKS_WORKSPACE_URL>/api/2.0/preview/mlflow/transition-requests/create -d '{"comment": "Please move this model into production!", "model_version": {"version": 1, "registered_model": {"name": "power-forecasting-model"}}, "stage": "Production"}'
# MAGIC ```
# COMMAND ----------
# MAGIC %md
# MAGIC Now that you've learned about stage transitions, transition the model to the `Production` stage.
import time
time.sleep(10)
client.transition_model_version_stage(
name=model_details.name,
version=model_details.version,
stage='Production',
)
# COMMAND ----------
model_version_details = client.get_model_version(
name=model_details.name,
version=model_details.version,
)
print("The current model stage is: '{stage}'".format(stage=model_version_details.current_stage))
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC Fetch the latest model using a `pyfunc`. Loading the model in this way allows us to use the model regardless of the package that was used to train it.
# MAGIC
# MAGIC <img alt="Side Note" title="Side Note" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)" src="https://files.training.databricks.com/static/images/icon-note.webp"/> You can load a specific version of the model too.
# COMMAND ----------
import mlflow.pyfunc
model_version_uri = "models:/{model_name}/1".format(model_name=model_name)
print("Loading registered model version from URI: '{model_uri}'".format(model_uri=model_version_uri))
model_version_1 = mlflow.pyfunc.load_model(model_version_uri)
# COMMAND ----------
# MAGIC %md
# MAGIC Apply the model.
# COMMAND ----------
model_version_1.predict(X_test)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Deploying a New Model Version
# MAGIC
# MAGIC The MLflow Model Registry enables you to create multiple model versions corresponding to a single registered model. By performing stage transitions, you can seamlessly integrate new model versions into your staging or production environments.
# COMMAND ----------
# MAGIC %md
# MAGIC Create a new model version and register that model when it's logged.
import mlflow
import mlflow.sklearn
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
df = pd.read_csv("/dbfs/mnt/training/airbnb/sf-listings/airbnb-cleaned-mlflow.csv")
X_train, X_test, y_train, y_test = train_test_split(df.drop(["price"], axis=1), df[["price"]].values.ravel(), random_state=42)
rf = RandomForestRegressor(n_estimators=300, max_depth=10)
rf.fit(X_train, y_train)
with mlflow.start_run(run_name="RF Model") as run:
mlflow.sklearn.log_model(
sk_model=rf,
artifact_path="sklearn-model",
registered_model_name=model_name,
)
mlflow.log_metric("mse", mean_squared_error(y_test, rf.predict(X_test)))
runID = run.info.run_uuid
model_version_infos = client.search_model_versions(f"name = '{model_name}'")
new_model_version = max([model_version_info.version for model_version_info in model_version_infos])
client.update_model_version(
name=model_name,
version=new_model_version,
description="This model version is a random forest containing 300 decision trees and a max depth of 10 that was trained in scikit-learn."
)
import time
time.sleep(10)
client.transition_model_version_stage(
name=model_name,
version=new_model_version,
stage="Staging",
)
client.transition_model_version_stage(
name=model_name,
version=new_model_version,
stage="Production",
)
,
stage="Archived",
)
client.delete_model_version(
name=model_name,
version=1
)
client.transition_model_version_stage(
name=model_name,
version=2,
stage="Archived",
)
client.delete_registered_model(model_name)
MAGIC **Question:** What can I do programatically versus using the UI?
# MAGIC **Answer:** Most operations can be done using the UI or in pure Python. A model must be tracked using Python, but from that point on everything can be done either way. For instance, a model logged using the MLflow tracking API can then be registered using the UI and can then be pushed into production.
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Classroom-Cleanup<br>
# MAGIC
# MAGIC Run the **`Classroom-Cleanup`** cell below to remove any artifacts created by this lesson.
# COMMAND ----------
# MAGIC %run "./Includes/Classroom-Cleanup"
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Next Steps
# MAGIC
# MAGIC Start the labs for this lesson, [Model Management Lab]($./Labs/05-Lab)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Additional Topics & Resources
# MAGIC
# MAGIC **Q:** Where can I find out more information on MLflow Model Registry?
# MAGIC **A:** Check out <a href="https://mlflow.org/docs/latest/registry.html" target="_blank">the MLflow documentation</a>
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2020 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="http://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="http://help.databricks.com/">Support</a>
| true | true |
1c3a7ac01ca66454f09348069e9fb311bffde81f | 2,350 | py | Python | tests/integration/models/test_ranking_metric.py | Remorax/COMET | e0a9c4116edb58fd2ddd2078329e06978e15b3b2 | [
"Apache-2.0"
] | 138 | 2020-09-22T14:59:52.000Z | 2022-03-30T18:43:41.000Z | tests/integration/models/test_ranking_metric.py | Remorax/COMET | e0a9c4116edb58fd2ddd2078329e06978e15b3b2 | [
"Apache-2.0"
] | 58 | 2020-11-19T11:41:21.000Z | 2022-03-31T17:54:46.000Z | tests/integration/models/test_ranking_metric.py | Remorax/COMET | e0a9c4116edb58fd2ddd2078329e06978e15b3b2 | [
"Apache-2.0"
] | 24 | 2020-09-28T02:35:55.000Z | 2022-03-14T12:51:40.000Z | # -*- coding: utf-8 -*-
import multiprocessing
import os
import shutil
import unittest
import torch
from comet.models import RankingMetric
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer.trainer import Trainer
from scipy.stats import pearsonr
from tests.data import DATA_PATH
from torch.utils.data import DataLoader
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["OMP_NUM_THREADS"] = "1"
class TestRankingMetric(unittest.TestCase):
@classmethod
def tearDownClass(cls):
shutil.rmtree(os.path.join(DATA_PATH, "checkpoints"))
def test_training(self):
seed_everything(12)
trainer = Trainer(
gpus=0,
max_epochs=4,
deterministic=True,
checkpoint_callback=True,
default_root_dir=DATA_PATH,
logger=False,
weights_summary=None,
progress_bar_refresh_rate=0,
)
model = RankingMetric(
encoder_model="BERT",
pretrained_model="google/bert_uncased_L-2_H-128_A-2",
train_data=os.path.join(DATA_PATH, "test_ranking_data.csv"),
validation_data=os.path.join(DATA_PATH, "test_ranking_data.csv"),
layerwise_decay=0.95,
batch_size=32,
learning_rate=1e-04,
encoder_learning_rate=1e-04,
)
trainer.fit(model)
self.assertTrue(
os.path.exists(
os.path.join(DATA_PATH, "checkpoints", "epoch=3-step=15.ckpt")
)
)
saved_model = RankingMetric.load_from_checkpoint(
os.path.join(DATA_PATH, "checkpoints", "epoch=3-step=15.ckpt")
)
dataset = saved_model.read_csv(
os.path.join(DATA_PATH, "test_regression_data.csv"), regression=True
)
y = [s["score"] for s in dataset]
dataloader = DataLoader(
dataset=dataset,
batch_size=256,
collate_fn=lambda x: saved_model.prepare_sample(x, inference=True),
num_workers=multiprocessing.cpu_count(),
)
y_hat = (
torch.cat(
trainer.predict(dataloaders=dataloader, return_predictions=True), dim=0
)
.cpu()
.tolist()
)
# This shouldn't break!
pearsonr(y_hat, y)[0]
| 31.756757 | 87 | 0.611064 |
import multiprocessing
import os
import shutil
import unittest
import torch
from comet.models import RankingMetric
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer.trainer import Trainer
from scipy.stats import pearsonr
from tests.data import DATA_PATH
from torch.utils.data import DataLoader
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["OMP_NUM_THREADS"] = "1"
class TestRankingMetric(unittest.TestCase):
@classmethod
def tearDownClass(cls):
shutil.rmtree(os.path.join(DATA_PATH, "checkpoints"))
def test_training(self):
seed_everything(12)
trainer = Trainer(
gpus=0,
max_epochs=4,
deterministic=True,
checkpoint_callback=True,
default_root_dir=DATA_PATH,
logger=False,
weights_summary=None,
progress_bar_refresh_rate=0,
)
model = RankingMetric(
encoder_model="BERT",
pretrained_model="google/bert_uncased_L-2_H-128_A-2",
train_data=os.path.join(DATA_PATH, "test_ranking_data.csv"),
validation_data=os.path.join(DATA_PATH, "test_ranking_data.csv"),
layerwise_decay=0.95,
batch_size=32,
learning_rate=1e-04,
encoder_learning_rate=1e-04,
)
trainer.fit(model)
self.assertTrue(
os.path.exists(
os.path.join(DATA_PATH, "checkpoints", "epoch=3-step=15.ckpt")
)
)
saved_model = RankingMetric.load_from_checkpoint(
os.path.join(DATA_PATH, "checkpoints", "epoch=3-step=15.ckpt")
)
dataset = saved_model.read_csv(
os.path.join(DATA_PATH, "test_regression_data.csv"), regression=True
)
y = [s["score"] for s in dataset]
dataloader = DataLoader(
dataset=dataset,
batch_size=256,
collate_fn=lambda x: saved_model.prepare_sample(x, inference=True),
num_workers=multiprocessing.cpu_count(),
)
y_hat = (
torch.cat(
trainer.predict(dataloaders=dataloader, return_predictions=True), dim=0
)
.cpu()
.tolist()
)
pearsonr(y_hat, y)[0]
| true | true |
1c3a7b6a2dbdfcc7a6cb2655d83c8b91264b42c6 | 2,072 | py | Python | models/xray_train.py | rahatsantosh/ipf_severity_detection | e08f72db344a6dd54868c83a2484c78f7ec7a6fe | [
"MIT"
] | null | null | null | models/xray_train.py | rahatsantosh/ipf_severity_detection | e08f72db344a6dd54868c83a2484c78f7ec7a6fe | [
"MIT"
] | 5 | 2021-06-08T22:20:20.000Z | 2022-03-12T00:46:52.000Z | models/xray_train.py | rahatsantosh/ipf_severity_detection | e08f72db344a6dd54868c83a2484c78f7ec7a6fe | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torchvision
import matplotlib.pyplot as plt
from shallow_autoenc import Autoencoder
from autoencoder_dataset import Dataset
# CUDA for PyTorch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
if use_cuda:
print(torch.cuda.get_device_name())
cudnn.benchmark = True
tf = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(p=0.5),
torchvision.transforms.RandomRotation(45),
torchvision.transforms.RandomVerticalFlip(p=0.5),
torchvision.transforms.Resize((1024, 1024)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Lambda(lambda x:torch.reshape(x, (-1, x.shape[0], x.shape[1], x.shape[2])))
])
root = "../../data/external/chest_xray/img"
training_set = torchvision.datasets.ImageFolder(root, transform=tf)
training_generator = DataLoader(training_set, batch_size = 32)
model = Autoencoder()
if use_cuda:
model.to(device)
def train_model(model,train_loader,optimizer,n_epochs=10,gpu=True):
loss_list=[]
for epoch in range(n_epochs):
for x, _ in train_loader:
if gpu:
# Transfer to GPU
x = x.to(device)
model.train()
optimizer.zero_grad()
y = model(x)
loss = criterion(y, x)
loss.backward()
optimizer.step()
loss_list.append(loss.data)
print('Epoch : ',epoch,' Loss : ',loss.data)
torch.save(model, '')
return loss_list
criterion = nn.CosineSimilarity()
optimizer = torch.optim.Adam(model.parameters())
loss_list = train_model(
model=model,
train_loader=training_generator,
optimizer=optimizer,
n_epochs=100,
gpu=True
)
print("-------------Done--------------")
plt.plot(np.arange(len(loss_list)),loss_list)
plt.savefig('../../reports/figures/xray_autoenc_loss.png')
model_path = "../../models/autoenc.pt"
torch.save(model, model_path)
| 28.383562 | 102 | 0.681467 | import numpy as np
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torchvision
import matplotlib.pyplot as plt
from shallow_autoenc import Autoencoder
from autoencoder_dataset import Dataset
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
if use_cuda:
print(torch.cuda.get_device_name())
cudnn.benchmark = True
tf = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(p=0.5),
torchvision.transforms.RandomRotation(45),
torchvision.transforms.RandomVerticalFlip(p=0.5),
torchvision.transforms.Resize((1024, 1024)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Lambda(lambda x:torch.reshape(x, (-1, x.shape[0], x.shape[1], x.shape[2])))
])
root = "../../data/external/chest_xray/img"
training_set = torchvision.datasets.ImageFolder(root, transform=tf)
training_generator = DataLoader(training_set, batch_size = 32)
model = Autoencoder()
if use_cuda:
model.to(device)
def train_model(model,train_loader,optimizer,n_epochs=10,gpu=True):
loss_list=[]
for epoch in range(n_epochs):
for x, _ in train_loader:
if gpu:
x = x.to(device)
model.train()
optimizer.zero_grad()
y = model(x)
loss = criterion(y, x)
loss.backward()
optimizer.step()
loss_list.append(loss.data)
print('Epoch : ',epoch,' Loss : ',loss.data)
torch.save(model, '')
return loss_list
criterion = nn.CosineSimilarity()
optimizer = torch.optim.Adam(model.parameters())
loss_list = train_model(
model=model,
train_loader=training_generator,
optimizer=optimizer,
n_epochs=100,
gpu=True
)
print("-------------Done--------------")
plt.plot(np.arange(len(loss_list)),loss_list)
plt.savefig('../../reports/figures/xray_autoenc_loss.png')
model_path = "../../models/autoenc.pt"
torch.save(model, model_path)
| true | true |
1c3a7c87a6d25531d0ab33c8faa31b4f10e373f5 | 1,089 | py | Python | utils/request.py | saptarshibasu15/API | f83ce39e46fb058e33ff5c6a36671fa004363de9 | [
"MIT"
] | 1 | 2021-05-01T02:25:27.000Z | 2021-05-01T02:25:27.000Z | utils/request.py | saptarshibasu15/API | f83ce39e46fb058e33ff5c6a36671fa004363de9 | [
"MIT"
] | null | null | null | utils/request.py | saptarshibasu15/API | f83ce39e46fb058e33ff5c6a36671fa004363de9 | [
"MIT"
] | null | null | null | from quart import Request as BaseRequest
from typing import Optional
from api.models import User
class Request(BaseRequest):
"""Custom request class to implement authorization."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._user = None
self.__has_fetched = False
@property
def is_authorized(self) -> bool:
return isinstance(self.jwt, dict)
@property
async def user(self) -> Optional[User]:
"""
The User object is no longer fetched in Authorization Middleware.
This is to reduce avg. response time.
"""
if not self.is_authorized:
return None
if not self.__has_fetched:
self._user = await User.fetch(id=self.user_id)
self.__has_fetched = True
return self._user
@property
def user_id(self) -> Optional[int]:
if not self.is_authorized:
return None
return int(self.jwt["uid"])
@property
def jwt(self) -> Optional[dict]:
return self.scope.get("jwt")
| 24.2 | 73 | 0.615243 | from quart import Request as BaseRequest
from typing import Optional
from api.models import User
class Request(BaseRequest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._user = None
self.__has_fetched = False
@property
def is_authorized(self) -> bool:
return isinstance(self.jwt, dict)
@property
async def user(self) -> Optional[User]:
if not self.is_authorized:
return None
if not self.__has_fetched:
self._user = await User.fetch(id=self.user_id)
self.__has_fetched = True
return self._user
@property
def user_id(self) -> Optional[int]:
if not self.is_authorized:
return None
return int(self.jwt["uid"])
@property
def jwt(self) -> Optional[dict]:
return self.scope.get("jwt")
| true | true |
1c3a7c92dba7f63865dc2773147c67d25a7863b8 | 1,320 | py | Python | data_loader/data_generator.py | jiuney/XAI606-EEGNet | 45ff28630ed1b09d0853f2cfb148a5dd2693e5ab | [
"MIT"
] | null | null | null | data_loader/data_generator.py | jiuney/XAI606-EEGNet | 45ff28630ed1b09d0853f2cfb148a5dd2693e5ab | [
"MIT"
] | null | null | null | data_loader/data_generator.py | jiuney/XAI606-EEGNet | 45ff28630ed1b09d0853f2cfb148a5dd2693e5ab | [
"MIT"
] | null | null | null | from torch.utils.data import DataLoader
from data_loader.dataset.bcic4_2a import BCIC4_2A
class DataGenerator:
def __init__(self, args):
print("[Load Data]")
if args.mode == 'train':
self.train_loader = self.__data_loader(args, 'train')
self.val_loader = self.__data_loader(args, 'val')
self.train_loader.dataset.X = self.train_loader.dataset.X.reshape(-1, 22,500)
self.val_loader.dataset.X = self.val_loader.dataset.X.reshape(-1, 22,500)
print(f"train size: {self.train_loader.dataset.X.shape}")
print(f"val size: {self.val_loader.dataset.X.shape}")
mini_batch_shape = list(self.train_loader.dataset.X.shape)
mini_batch_shape[0] = args.batch_size
args.cfg.input_shape = mini_batch_shape
print("")
else:
self.test_loader = self.__data_loader(args, 'test')
self.test_loader.dataset.X = self.test_loader.dataset.X.reshape(-1, 22,500)
print("")
def __data_loader(self, args, phase):
return DataLoader(BCIC4_2A(args, phase),
batch_size=args.batch_size,
shuffle=True if phase == 'train' else False,
drop_last=True if phase == 'train' else False)
| 42.580645 | 89 | 0.609091 | from torch.utils.data import DataLoader
from data_loader.dataset.bcic4_2a import BCIC4_2A
class DataGenerator:
def __init__(self, args):
print("[Load Data]")
if args.mode == 'train':
self.train_loader = self.__data_loader(args, 'train')
self.val_loader = self.__data_loader(args, 'val')
self.train_loader.dataset.X = self.train_loader.dataset.X.reshape(-1, 22,500)
self.val_loader.dataset.X = self.val_loader.dataset.X.reshape(-1, 22,500)
print(f"train size: {self.train_loader.dataset.X.shape}")
print(f"val size: {self.val_loader.dataset.X.shape}")
mini_batch_shape = list(self.train_loader.dataset.X.shape)
mini_batch_shape[0] = args.batch_size
args.cfg.input_shape = mini_batch_shape
print("")
else:
self.test_loader = self.__data_loader(args, 'test')
self.test_loader.dataset.X = self.test_loader.dataset.X.reshape(-1, 22,500)
print("")
def __data_loader(self, args, phase):
return DataLoader(BCIC4_2A(args, phase),
batch_size=args.batch_size,
shuffle=True if phase == 'train' else False,
drop_last=True if phase == 'train' else False)
| true | true |
1c3a7e2f81ccaf710e9ab7d0d3e2a280f179d8b8 | 1,394 | py | Python | model-optimizer/extensions/front/tf/gather_ext.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | 3 | 2020-02-09T23:25:37.000Z | 2021-01-19T09:44:12.000Z | model-optimizer/extensions/front/tf/gather_ext.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | null | null | null | model-optimizer/extensions/front/tf/gather_ext.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | 2 | 2020-04-18T16:24:39.000Z | 2021-01-19T09:42:19.000Z | """
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from extensions.ops.gather import Gather
from mo.front.extractor import FrontExtractorOp
class GatherFrontExtractor(FrontExtractorOp):
op = 'Gather'
enabled = True
@staticmethod
def extract(node):
attrs = {}
Gather.update_node_stat(node, attrs)
return __class__.enabled
class ResourceGatherFrontExtractor(FrontExtractorOp):
op = 'ResourceGather'
enabled = True
@staticmethod
def extract(node):
attrs = {}
Gather.update_node_stat(node, attrs)
return __class__.enabled
class GatherV2FrontExtractor(FrontExtractorOp):
op = 'GatherV2'
enabled = True
@staticmethod
def extract(node):
attrs = {}
Gather.update_node_stat(node, attrs)
return __class__.enabled
| 23.233333 | 73 | 0.713056 |
import numpy as np
from extensions.ops.gather import Gather
from mo.front.extractor import FrontExtractorOp
class GatherFrontExtractor(FrontExtractorOp):
op = 'Gather'
enabled = True
@staticmethod
def extract(node):
attrs = {}
Gather.update_node_stat(node, attrs)
return __class__.enabled
class ResourceGatherFrontExtractor(FrontExtractorOp):
op = 'ResourceGather'
enabled = True
@staticmethod
def extract(node):
attrs = {}
Gather.update_node_stat(node, attrs)
return __class__.enabled
class GatherV2FrontExtractor(FrontExtractorOp):
op = 'GatherV2'
enabled = True
@staticmethod
def extract(node):
attrs = {}
Gather.update_node_stat(node, attrs)
return __class__.enabled
| true | true |
1c3a7e35403f792a0cf1f5ff099da7a89b4cec8c | 4,974 | py | Python | backend/env/lib/python3.8/site-packages/prompt_toolkit/contrib/telnet/protocol.py | lubitelpospat/CFM-source | 4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03 | [
"MIT"
] | 17 | 2020-06-10T23:33:11.000Z | 2021-01-02T20:38:25.000Z | backend/env/lib/python3.8/site-packages/prompt_toolkit/contrib/telnet/protocol.py | lubitelpospat/CFM-source | 4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03 | [
"MIT"
] | 24 | 2020-03-25T19:35:43.000Z | 2022-02-10T11:46:50.000Z | backend/env/lib/python3.8/site-packages/prompt_toolkit/contrib/telnet/protocol.py | lubitelpospat/CFM-source | 4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03 | [
"MIT"
] | 11 | 2019-01-21T17:51:48.000Z | 2021-08-10T07:04:33.000Z | """
Parser for the Telnet protocol. (Not a complete implementation of the telnet
specification, but sufficient for a command line interface.)
Inspired by `Twisted.conch.telnet`.
"""
import struct
from typing import Callable, Generator
from .log import logger
__all__ = [
"TelnetProtocolParser",
]
def int2byte(number: int) -> bytes:
return bytes((number,))
# Telnet constants.
NOP = int2byte(0)
SGA = int2byte(3)
IAC = int2byte(255)
DO = int2byte(253)
DONT = int2byte(254)
LINEMODE = int2byte(34)
SB = int2byte(250)
WILL = int2byte(251)
WONT = int2byte(252)
MODE = int2byte(1)
SE = int2byte(240)
ECHO = int2byte(1)
NAWS = int2byte(31)
LINEMODE = int2byte(34)
SUPPRESS_GO_AHEAD = int2byte(3)
DM = int2byte(242)
BRK = int2byte(243)
IP = int2byte(244)
AO = int2byte(245)
AYT = int2byte(246)
EC = int2byte(247)
EL = int2byte(248)
GA = int2byte(249)
class TelnetProtocolParser:
"""
Parser for the Telnet protocol.
Usage::
def data_received(data):
print(data)
def size_received(rows, columns):
print(rows, columns)
p = TelnetProtocolParser(data_received, size_received)
p.feed(binary_data)
"""
def __init__(
self,
data_received_callback: Callable[[bytes], None],
size_received_callback: Callable[[int, int], None],
) -> None:
self.data_received_callback = data_received_callback
self.size_received_callback = size_received_callback
self._parser = self._parse_coroutine()
self._parser.send(None) # type: ignore
def received_data(self, data: bytes) -> None:
self.data_received_callback(data)
def do_received(self, data: bytes) -> None:
""" Received telnet DO command. """
logger.info("DO %r", data)
def dont_received(self, data: bytes) -> None:
""" Received telnet DONT command. """
logger.info("DONT %r", data)
def will_received(self, data: bytes) -> None:
""" Received telnet WILL command. """
logger.info("WILL %r", data)
def wont_received(self, data: bytes) -> None:
""" Received telnet WONT command. """
logger.info("WONT %r", data)
def command_received(self, command: bytes, data: bytes) -> None:
if command == DO:
self.do_received(data)
elif command == DONT:
self.dont_received(data)
elif command == WILL:
self.will_received(data)
elif command == WONT:
self.wont_received(data)
else:
logger.info("command received %r %r", command, data)
def naws(self, data: bytes) -> None:
"""
Received NAWS. (Window dimensions.)
"""
if len(data) == 4:
# NOTE: the first parameter of struct.unpack should be
# a 'str' object. Both on Py2/py3. This crashes on OSX
# otherwise.
columns, rows = struct.unpack(str("!HH"), data)
self.size_received_callback(rows, columns)
else:
logger.warning("Wrong number of NAWS bytes")
def negotiate(self, data: bytes) -> None:
"""
Got negotiate data.
"""
command, payload = data[0:1], data[1:]
if command == NAWS:
self.naws(payload)
else:
logger.info("Negotiate (%r got bytes)", len(data))
def _parse_coroutine(self) -> Generator[None, bytes, None]:
"""
Parser state machine.
Every 'yield' expression returns the next byte.
"""
while True:
d = yield
if d == int2byte(0):
pass # NOP
# Go to state escaped.
elif d == IAC:
d2 = yield
if d2 == IAC:
self.received_data(d2)
# Handle simple commands.
elif d2 in (NOP, DM, BRK, IP, AO, AYT, EC, EL, GA):
self.command_received(d2, b"")
# Handle IAC-[DO/DONT/WILL/WONT] commands.
elif d2 in (DO, DONT, WILL, WONT):
d3 = yield
self.command_received(d2, d3)
# Subnegotiation
elif d2 == SB:
# Consume everything until next IAC-SE
data = []
while True:
d3 = yield
if d3 == IAC:
d4 = yield
if d4 == SE:
break
else:
data.append(d4)
else:
data.append(d3)
self.negotiate(b"".join(data))
else:
self.received_data(d)
def feed(self, data: bytes) -> None:
"""
Feed data to the parser.
"""
for b in data:
self._parser.send(int2byte(b))
| 26.31746 | 76 | 0.533374 | import struct
from typing import Callable, Generator
from .log import logger
__all__ = [
"TelnetProtocolParser",
]
def int2byte(number: int) -> bytes:
return bytes((number,))
NOP = int2byte(0)
SGA = int2byte(3)
IAC = int2byte(255)
DO = int2byte(253)
DONT = int2byte(254)
LINEMODE = int2byte(34)
SB = int2byte(250)
WILL = int2byte(251)
WONT = int2byte(252)
MODE = int2byte(1)
SE = int2byte(240)
ECHO = int2byte(1)
NAWS = int2byte(31)
LINEMODE = int2byte(34)
SUPPRESS_GO_AHEAD = int2byte(3)
DM = int2byte(242)
BRK = int2byte(243)
IP = int2byte(244)
AO = int2byte(245)
AYT = int2byte(246)
EC = int2byte(247)
EL = int2byte(248)
GA = int2byte(249)
class TelnetProtocolParser:
def __init__(
self,
data_received_callback: Callable[[bytes], None],
size_received_callback: Callable[[int, int], None],
) -> None:
self.data_received_callback = data_received_callback
self.size_received_callback = size_received_callback
self._parser = self._parse_coroutine()
self._parser.send(None)
def received_data(self, data: bytes) -> None:
self.data_received_callback(data)
def do_received(self, data: bytes) -> None:
logger.info("DO %r", data)
def dont_received(self, data: bytes) -> None:
logger.info("DONT %r", data)
def will_received(self, data: bytes) -> None:
logger.info("WILL %r", data)
def wont_received(self, data: bytes) -> None:
logger.info("WONT %r", data)
def command_received(self, command: bytes, data: bytes) -> None:
if command == DO:
self.do_received(data)
elif command == DONT:
self.dont_received(data)
elif command == WILL:
self.will_received(data)
elif command == WONT:
self.wont_received(data)
else:
logger.info("command received %r %r", command, data)
def naws(self, data: bytes) -> None:
if len(data) == 4:
columns, rows = struct.unpack(str("!HH"), data)
self.size_received_callback(rows, columns)
else:
logger.warning("Wrong number of NAWS bytes")
def negotiate(self, data: bytes) -> None:
command, payload = data[0:1], data[1:]
if command == NAWS:
self.naws(payload)
else:
logger.info("Negotiate (%r got bytes)", len(data))
def _parse_coroutine(self) -> Generator[None, bytes, None]:
while True:
d = yield
if d == int2byte(0):
pass
elif d == IAC:
d2 = yield
if d2 == IAC:
self.received_data(d2)
elif d2 in (NOP, DM, BRK, IP, AO, AYT, EC, EL, GA):
self.command_received(d2, b"")
elif d2 in (DO, DONT, WILL, WONT):
d3 = yield
self.command_received(d2, d3)
elif d2 == SB:
data = []
while True:
d3 = yield
if d3 == IAC:
d4 = yield
if d4 == SE:
break
else:
data.append(d4)
else:
data.append(d3)
self.negotiate(b"".join(data))
else:
self.received_data(d)
def feed(self, data: bytes) -> None:
for b in data:
self._parser.send(int2byte(b))
| true | true |
1c3a7e4c47599ec3fd2482ae33f5943bbbc4669d | 480 | py | Python | appengine/chromium_try_flakes/issue_tracker/utils.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | appengine/chromium_try_flakes/issue_tracker/utils.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | appengine/third_party/monorail_api/utils.py | NDevTK/chromium-infra | d38e088e158d81f7f2065a38aa1ea1894f735ec4 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions"""
import datetime
def parseDateTime(dt_str):
if dt_str is None:
return None
dt, _, us = dt_str.partition(".")
dt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
if us:
us = int(us.rstrip("Z"), 10)
return dt + datetime.timedelta(microseconds=us)
else:
return dt
| 25.263158 | 72 | 0.68125 |
import datetime
def parseDateTime(dt_str):
if dt_str is None:
return None
dt, _, us = dt_str.partition(".")
dt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
if us:
us = int(us.rstrip("Z"), 10)
return dt + datetime.timedelta(microseconds=us)
else:
return dt
| true | true |
1c3a7ffbe112c0b6aa9e4322f1d64df0ef549c64 | 20,637 | py | Python | acc.py | gamestop-tech-brands/py-acc | c10cd3f1cdb2a6752a49aacdf8113cc93644724b | [
"Apache-2.0"
] | 4 | 2018-12-02T20:37:44.000Z | 2019-01-15T17:43:29.000Z | acc.py | gamestop-tech-brands/py-acc | c10cd3f1cdb2a6752a49aacdf8113cc93644724b | [
"Apache-2.0"
] | null | null | null | acc.py | gamestop-tech-brands/py-acc | c10cd3f1cdb2a6752a49aacdf8113cc93644724b | [
"Apache-2.0"
] | null | null | null | #######################################################################################################################
#
# AppleCare Connect API Library Python 3.x Module
#
# Overview
# The purpose of this Python module is to provide a standard Python module to interact with the AppleCare Connect API.
# Each method in this function interacts seamlessly with the API and either returns data from the method call or a
# status message indicating the result of the API call
#
# API methods not yet implemented are:
# - Get POC Content
# - Get Consolidated POC
# - Device Configuration
# - Update Failed Authorization Status
#
# Dependencies
# - Python 3.x
# - 'requests' module
#
# Credits
# Big thanks to the folks that wrote the Meraki 'dashboard-api-python' module. This module borrowed a lot of from them.
#######################################################################################################################
import json
import os
import requests
def acc_credentials():
"""
:usage: Defines the AppleCare Settings for the API calls
:return: Requests Session w/ headers and ACC cert, ACC ShipTo, and endpoint base URL
"""
# Begin a Requests Session for all API Calls
session = requests.Session()
session.headers.update({'Content-Type': "application/json;charset=utf-8"})
# Get ACC Connection Details from Environment Variables
acc_ship_to = os.environ['ACC_SHIPTO'] # AppleCare Connect 10 Digit SHIPTO Number
acc_env = os.environ['ACC_ENV'] # AppleCare Connect Environment: UAT or PROD
# Set the base_url of the AppleCare Connect endpoint and SSL cert
# Default to ACC Application IPT Sandbox w/ UAT Cert
base_url = "https://acc-ipt.apple.com/order-service/1.0"
session.cert = (
os.environ['ACC_UAT_CERT'], # Path to AppleCare Connect UAT Cert .PEM File
os.environ['ACC_UAT_PRIVATE_KEY'] # Path to AppleCare Connect UAT Private Key .PEM File
)
if acc_env == 'UAT':
# Joint UAT environment
session.cert = (
os.environ['ACC_UAT_CERT'], # Path to AppleCare Connect UAT Cert .PEM File
os.environ['ACC_UAT_PRIVATE_KEY'] # Path to AppleCare Connect UAT Private Key .PEM File
)
if (int(acc_ship_to) % 2) == 0:
base_url = "https://api-applecareconnect-ept.apple.com/order-service/1.0"
else:
base_url = "https://api-applecareconnect-ept2.apple.com/order-service/1.0"
elif acc_env == 'PROD':
# Production environment
session.cert = (
os.environ['ACC_PROD_CERT'], # Path to AppleCare Connect PROD Cert .PEM File
os.environ['ACC_PROD_PRIVATE_KEY'] # Path to AppleCare Connect PROD Private Key .PEM File
)
if (int(acc_ship_to) % 2) == 0:
base_url = "https://api-applecareconnect.apple.com/order-service/1.0"
else:
base_url = "https://api-applecareconnect2.apple.com/order-service/1.0"
return session, acc_ship_to, base_url
def is_json(json_array):
"""
:param json_array: String variable to be validated if it is JSON
:return: True if json_array is valid, False if not
"""
try:
json_object = json.loads(json_array)
except ValueError:
return False
return True
def response_handler(full_response, suppress_print):
"""
:param full_response: JSON response from the DEP API
:param suppress_print: Prints output when function is called
:return: Full API response and any API errors and their messages
"""
valid_return = is_json(full_response)
if valid_return:
json_response = json.loads(full_response)
error_code = []
error_message = []
# Verify/Create/Cancel Order Error
if "orderErrorResponse" in full_response:
api_errors = json_response["orderErrorResponse"]
if isinstance(api_errors, dict):
# Single Error
error_code.append(json_response["orderErrorResponse"]["errorCode"])
error_message.append(json_response["orderErrorResponse"]["errorMessage"])
else:
# Multiple Errors
for error in api_errors:
error_code.append(error["errorCode"])
error_message.append(error["errorMessage"])
if suppress_print is False:
print('API Error: {0}'.format(error_code))
# Device Enrollment Error
elif 'deviceErrorResponse' in full_response:
api_errors = json_response["orderDetailsResponses"]["deviceEligibility"]["deviceErrorResponse"]
if isinstance(api_errors, dict):
# Single Error
error_code.append(
json_response["orderDetailsResponses"]["deviceEligibility"]["deviceErrorResponse"]["errorCode"]
)
error_message.append(
json_response["orderDetailsResponses"]["deviceEligibility"]["deviceErrorResponse"]["errorMessage"]
)
else:
# Multiple Errors
for error in api_errors:
error_code.append(error["errorCode"])
error_message.append(error["errorMessage"])
if suppress_print is False:
print('API Error: {0}'.format(error_code))
# Order Lookup Error
elif "lookupErrorResponse" in full_response:
api_errors = json_response["lookupErrorResponse"]
if isinstance(api_errors, dict):
# Single Error
error_code.append(
json_response["lookupErrorResponse"]["errorCode"]
)
error_message.append(
json_response["lookupErrorResponse"]["errorMessage"]
)
else:
# Multiple Errors
for error in api_errors:
error_code.append(error["errorCode"])
error_message.append(error["errorMessage"])
if suppress_print is False:
print('API Error: {0}'.format(error_code))
# POC Content Error
elif "pocErrorResponse" in full_response:
api_errors = json_response["pocErrorResponse"]
if isinstance(api_errors, dict):
# Single Error
error_code.append(
json_response["pocErrorResponse"]["errorCode"]
)
error_message.append(
json_response["pocErrorResponse"]["errorMessage"]
)
else:
# Multiple Errors
for error in api_errors:
error_code.append(error["errorCode"])
error_message.append(error["errorMessage"])
if suppress_print is False:
print('API Error: {0}'.format(error_code))
# Device Configuration Error
elif "deviceConfigErrorResponse" in full_response:
api_errors = json_response["deviceConfigErrorResponse"]
if isinstance(api_errors, dict):
# Single Error
error_code.append(
json_response["deviceConfigErrorResponse"]["errorCode"]
)
error_message.append(
json_response["deviceConfigErrorResponse"]["errorMessage"]
)
else:
# Multiple Errors
for error in api_errors:
error_code.append(error["errorCode"])
error_message.append(error["errorMessage"])
if suppress_print is False:
print('API Error: {0}'.format(error_code))
# Failed Auth Error
elif "failedAuthErrorResponse" in full_response:
api_errors = json_response["failedAuthErrorResponse"]
if isinstance(api_errors, dict):
# Single Error
error_code.append(
json_response["failedAuthErrorResponse"]["errorCode"]
)
error_message.append(
json_response["failedAuthErrorResponse"]["errorMessage"]
)
else:
# Multiple Errors
for error in api_errors:
error_code.append(error["errorCode"])
error_message.append(error["errorMessage"])
if suppress_print is False:
print('API Error: {0}'.format(error_code))
# Consolidated POC Error
elif "errorResponse" in full_response:
api_errors = json_response["errorResponse"]
if isinstance(api_errors, dict):
# Single Error
error_code.append(
json_response["errorResponse"]["errorCode"]
)
error_message.append(
json_response["errorResponse"]["errorMessage"]
)
else:
# Multiple Errors
for error in api_errors:
error_code.append(error["errorCode"])
error_message.append(error["errorMessage"])
if suppress_print is False:
print('API Error: {0}'.format(error_code))
# Ship-to Error
elif "error_code" in full_response:
error_code.append(json_response["errorCode"])
error_message.append(json_response["errorMessage"])
if suppress_print is False:
print('API Error: {0}'.format(error_code))
# No Errors
else:
if suppress_print is False:
print('REST Operation Successful - See full response for details\n')
else:
error_code = "ACC_ERR_0001"
error_message = "JSON is invalid - Inspect full response for errors"
if suppress_print is False:
print('{}\n'.format(error_message))
print(full_response)
return json.loads(full_response), error_code, error_message
def verify_order(invoice_number, first_name, last_name, company_name, email_address, address_line1, address_line2, city,
state, zip_code, device_id, secondary_serial, purchase_date, suppress_print=False):
"""
:usage: Designed to verify an order, to ensure that all criteria are met before placing the order. This API is
recommended to be used before using the createOrder API.
:param invoice_number: Invoice number from POS
:param first_name: Customer's first name as shown on invoice
:param last_name: Customer's last name as shown on invoice
:param company_name: Customer's company as shown on invoice (optional)
:param email_address: Customer's email address as shown on invoice
:param address_line1: First line of Customer's street address as shown on invoice (optional)
:param address_line2: Second line of Customer's street address as shown on invoice (optional)
:param city: City of Customer's street address as shown on invoice (optional)
:param state: State of Customer's street address as shown on invoice (optional)
:param zip_code: Zip Code of Customer's street address as shown on invoice (optional)
:param device_id: Serial number of the AppleCare eligible device sold
:param secondary_serial: Serial number the secondary AppleCare eligible device sold (Thunderbolt Display, optional)
:param purchase_date: Purchase date of the AppleCare eligible device
:param suppress_print: Suppress any print output from function (Default: False)
:return: JSON formatted strings of the complete API request, response, and any error codes
"""
# Establish request variables
session, acc_ship_to, base_url = acc_credentials()
post_url = '{0}/verify-order/'.format(str(base_url))
call_type = 'verify_order'
# Create request array
request_array = dict(shipTo=acc_ship_to, timeZone="420", langCode="en")
# Customer Request array
customer = dict(
customerEmailId=email_address, address_line1=address_line1, address_line2=address_line2, city=city,
stateCode=state, countryCode="US", zipCode=zip_code
)
# Use 'company_name' if 'first_name' and 'last_name' combined is longer than 34 characters
full_name = '{0} {1}'.format(str(first_name), str(last_name))
if len(full_name) > 34:
customer['company_name'] = full_name
customer['customerFirstName'] = ""
customer['customerLastName'] = ""
else:
customer['company_name'] = company_name
customer['customerFirstName'] = first_name
customer['customerLastName'] = last_name
# deviceRequest
device = dict(
deviceId=device_id.upper(), secondarySerialNumber=secondary_serial,
hardwareDateOfPurchase=purchase_date, verifyMPN="", nsPart=""
)
# Prepare data in array
post_data = dict(
requestContext=request_array, customerRequest=customer, deviceRequest=device,
appleCareSalesDate=purchase_date, pocLanguage="ENG", pocDeliveryPreference="E",
purchaseOrderNumber=invoice_number, marketID="", overridePocFlag="", emailFlag="1"
)
# Format post_data as JSON
full_request = json.dumps(post_data)
# Send data to API
response = session.post(post_url, data=full_request)
# Call return handler function to parse request response
full_response, error_code, error_message = response_handler(response.text, suppress_print)
return post_data, full_response, error_code, error_message, call_type
def create_order(invoice_number, first_name, last_name, company_name, email_address, address_line1, address_line2, city,
state, zip_code, device_id, secondary_serial, purchase_date, suppress_print=False):
"""
:usage: Allows users to create an order to enroll a unit for AppleCare (APP/AC+). It is recommended that this API
always be preceded by a verifyOrder API call, to increase the chances of a successful order creation.
:param invoice_number: Invoice number from POS
:param first_name: Customer's first name as shown on invoice
:param last_name: Customer's last name as shown on invoice
:param company_name: Customer's company as shown on invoice (optional)
:param email_address: Customer's email address as shown on invoice
:param address_line1: First line of Customer's street address as shown on invoice (optional)
:param address_line2: Second line of Customer's street address as shown on invoice (optional)
:param city: City of Customer's street address as shown on invoice (optional)
:param state: State of Customer's street address as shown on invoice (optional)
:param zip_code: Zip Code of Customer's street address as shown on invoice (optional)
:param device_id: Serial number of the AppleCare eligible device sold
:param secondary_serial: Serial number the secondary AppleCare eligible device sold (Thunderbolt Display, optional)
:param purchase_date: Purchase date of the AppleCare eligible device
:param suppress_print: Suppress any print output from function (Default: False)
:return: JSON formatted strings of the complete API request, response, and any error codes
"""
# Establish request variables
session, acc_ship_to, base_url = acc_credentials()
post_url = '{0}/create-order/'.format(str(base_url))
call_type = 'create_order'
# Create request array
request_array = dict(shipTo=acc_ship_to, timeZone="420", langCode="en")
# Customer Request array
customer = dict(
customerEmailId=email_address, address_line1=address_line1, address_line2=address_line2, city=city,
stateCode=state, countryCode="US", zipCode=zip_code
)
# Use 'company_name' if 'first_name' and 'last_name' combined is longer than 34 characters
full_name = '{0} {1}'.format(str(first_name), str(last_name))
if len(full_name) > 34:
customer['company_name'] = full_name
customer['customerFirstName'] = ""
customer['customerLastName'] = ""
else:
customer['company_name'] = company_name
customer['customerFirstName'] = first_name
customer['customerLastName'] = last_name
# deviceRequest
device = dict(
deviceId=device_id.upper(), secondarySerialNumber=secondary_serial,
hardwareDateOfPurchase=purchase_date, verifyMPN="", nsPart=""
)
# Prepare data in array
post_data = dict(
requestContext=request_array, customerRequest=customer, deviceRequest=device,
appleCareSalesDate=purchase_date, pocLanguage="ENG", pocDeliveryPreference="E",
purchaseOrderNumber=invoice_number, marketID="", overridePocFlag="", emailFlag="1"
)
# Format post_data as JSON
full_request = json.dumps(post_data)
# Send data to API
response = session.post(post_url, data=full_request)
# Call return handler function to parse request response
full_response, error_code, error_message = response_handler(response.text, suppress_print)
return post_data, full_response, error_code, error_message, call_type
def cancel_order(device_id, cancellation_date, cancel_reason_code, suppress_print=False):
"""
:usage: Designed to cancel an Auto Enrollment (AE) Order for AppleCare (APP/AC+).
:param device_id: Serial number of the AppleCare eligible device sold
:param cancellation_date: Date the device and AppleCare were refunded as shown on the return invoice
:param cancel_reason_code: Cancellation code explaing why device was returned
:param suppress_print: Suppress any print output from function (Default: False)
:return: JSON formatted strings of the complete API request, response, and any error codes
"""
# Establish request variables
session, acc_ship_to, base_url = acc_credentials()
post_url = '{0}/cancel-order/'.format(str(base_url))
call_type = 'cancel_order'
# Create request array
request_array = dict(shipTo=acc_ship_to, timeZone="420", langCode="en")
# Prepare data in array
post_data = dict(
requestContext=request_array, deviceId=device_id.upper(), cancellationDate=cancellation_date,
purchaseOrderNumber="", cancelReasonCode=cancel_reason_code
)
# Format post_data as JSON
full_request = json.dumps(post_data)
# Send data to API
response = session.post(post_url, data=full_request)
# Call return handler function to parse request response
full_response, error_code, error_message = response_handler(response.text, suppress_print)
return post_data, full_response, error_code, error_message, call_type
def three_sixty_lookup(invoice_number, device_id, email_address, suppress_print=False):
"""
:usage: Designed to search a details of the device based on various parameters.
:param invoice_number: Refund invoice number from POS
:param device_id: Serial number of the AppleCare eligible device sold
:param email_address: Registered email associated with AppleCare Enrollment
:param suppress_print: Suppress any print output from function (Default: False)
:return: JSON formatted strings of the complete API request, response, and any error codes
"""
# Establish request variables
session, acc_ship_to, base_url = acc_credentials()
post_url = '{0}/get-order/'.format(str(base_url))
call_type = 'three_sixty_lookup'
# Create request array
request_array = dict(shipTo=acc_ship_to, timeZone="420", langCode="en")
# Prepare data in array
post_data = dict(requestContext=request_array)
# Only one variable is needed. Only pass the one we get.
if device_id:
post_data['deviceId'] = device_id.upper()
post_data['purchaseOrderNumber'] = ""
post_data['customerEmailId'] = ""
elif not device_id and invoice_number:
post_data['purchaseOrderNumber'] = invoice_number
post_data['deviceId'] = ""
post_data['customerEmailId'] = ""
elif not device_id and not invoice_number and email_address:
post_data['customerEmailId'] = email_address
post_data['deviceId'] = ""
post_data['purchaseOrderNumber'] = ""
# Prepare the post_data for the API
full_request = json.dumps(post_data)
# Send data to API
response = session.post(post_url, data=full_request)
# Call return handler function to parse request response
full_response, error_code, error_message = response_handler(response.text, suppress_print)
return post_data, full_response, error_code, error_message, call_type
| 44.285408 | 120 | 0.657024 | true | true | |
1c3a8105ce41b9b0a4cec6739e683ef2e18381b7 | 1,082 | py | Python | userdocker/userdocker.py | jsteffen/userdocker | eb3b6a2421ca392ec4485744244d913e51687040 | [
"MIT"
] | 15 | 2017-03-01T17:08:20.000Z | 2021-03-18T15:32:02.000Z | userdocker/userdocker.py | jsteffen/userdocker | eb3b6a2421ca392ec4485744244d913e51687040 | [
"MIT"
] | 15 | 2017-03-07T16:21:38.000Z | 2021-04-06T14:28:04.000Z | userdocker/userdocker.py | jsteffen/userdocker | eb3b6a2421ca392ec4485744244d913e51687040 | [
"MIT"
] | 7 | 2017-12-06T12:00:31.000Z | 2021-12-21T03:53:19.000Z | # -*- coding: utf-8 -*-
import logging
import os
import sys
from .helpers.logger import logger
from .helpers.logger import logger_setup
from .helpers.cmd import init_cmd
from .helpers.exceptions import UserDockerException
from .helpers.execute import exit_exec_cmd
from .parser import parse_args
from .subcommands import specific_command_executors
if not os.getenv('SUDO_UID'):
logging.basicConfig()
logger.warning("%s should be executed via sudo", sys.argv[0])
def prepare_and_exec_cmd(args):
scmd = args.subcommand
if scmd in specific_command_executors:
specific_command_executors[scmd](args)
else:
exit_exec_cmd(init_cmd(args), dry_run=args.dry_run)
def parse_and_exec_cmd():
if os.getenv('DOCKER_HOST'):
raise UserDockerException(
'ERROR: DOCKER_HOST env var not supported yet'
)
args = parse_args()
logger_setup(args)
prepare_and_exec_cmd(args)
def main():
try:
parse_and_exec_cmd()
except UserDockerException as e:
print(e, file=sys.stderr)
sys.exit(1)
| 23.521739 | 65 | 0.713494 |
import logging
import os
import sys
from .helpers.logger import logger
from .helpers.logger import logger_setup
from .helpers.cmd import init_cmd
from .helpers.exceptions import UserDockerException
from .helpers.execute import exit_exec_cmd
from .parser import parse_args
from .subcommands import specific_command_executors
if not os.getenv('SUDO_UID'):
logging.basicConfig()
logger.warning("%s should be executed via sudo", sys.argv[0])
def prepare_and_exec_cmd(args):
scmd = args.subcommand
if scmd in specific_command_executors:
specific_command_executors[scmd](args)
else:
exit_exec_cmd(init_cmd(args), dry_run=args.dry_run)
def parse_and_exec_cmd():
if os.getenv('DOCKER_HOST'):
raise UserDockerException(
'ERROR: DOCKER_HOST env var not supported yet'
)
args = parse_args()
logger_setup(args)
prepare_and_exec_cmd(args)
def main():
try:
parse_and_exec_cmd()
except UserDockerException as e:
print(e, file=sys.stderr)
sys.exit(1)
| true | true |
1c3a812258ec9a355328f15ed21b91f54a184eaa | 654 | py | Python | 1045 - Tipos de Triangulos.py | le16bits/URI---Python | 9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db | [
"Apache-2.0"
] | null | null | null | 1045 - Tipos de Triangulos.py | le16bits/URI---Python | 9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db | [
"Apache-2.0"
] | null | null | null | 1045 - Tipos de Triangulos.py | le16bits/URI---Python | 9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db | [
"Apache-2.0"
] | null | null | null | vl=input().split()
A=float(vl[0])
B=float(vl[1])
C=float(vl[2])
if B>A:
aux=B;B=A;A=aux
if C>A:
aux=C;C=A;A=aux
if C>B:
aux=C;C=B;B=aux
if C>A:
aux=C;C=A;A=aux
if B>A:
aux=B;B=A;A=aux
if C>B:
aux=C;C=B;B=aux
print("%.1f %.1f %.1f" %(A,B,C))
if A>=(B+C):
print("NAO FORMA TRIANGULO")
elif A**2 > ((B**2) + (C**2)):
print("TRIANGULO OBTUSANGULO")
if ((A**2)==(B**2+C**2)):
print("TRIANGULO RETANGULO")
if A**2 < (B**2 + C**2):
print("TRIANGULO ACUTANGULO")
if A==B and A==C:
print("TRIANGULO EQUILATERO")
if (((A==B)and(A!=C)) or ((B==C) and (B!=A))):
print("TRIANGULO ISOSCELES")
| 19.235294 | 46 | 0.513761 | vl=input().split()
A=float(vl[0])
B=float(vl[1])
C=float(vl[2])
if B>A:
aux=B;B=A;A=aux
if C>A:
aux=C;C=A;A=aux
if C>B:
aux=C;C=B;B=aux
if C>A:
aux=C;C=A;A=aux
if B>A:
aux=B;B=A;A=aux
if C>B:
aux=C;C=B;B=aux
print("%.1f %.1f %.1f" %(A,B,C))
if A>=(B+C):
print("NAO FORMA TRIANGULO")
elif A**2 > ((B**2) + (C**2)):
print("TRIANGULO OBTUSANGULO")
if ((A**2)==(B**2+C**2)):
print("TRIANGULO RETANGULO")
if A**2 < (B**2 + C**2):
print("TRIANGULO ACUTANGULO")
if A==B and A==C:
print("TRIANGULO EQUILATERO")
if (((A==B)and(A!=C)) or ((B==C) and (B!=A))):
print("TRIANGULO ISOSCELES")
| true | true |
1c3a81c0fe73e0b3a9110d9fcf0740d103e34d93 | 381 | py | Python | altair_examples/layered_bar_chart.py | progressivis/altair_examples | 9a453ecebdf0bd4776696d4233bee2a8f2eb3606 | [
"BSD-3-Clause"
] | 1 | 2021-10-30T03:55:24.000Z | 2021-10-30T03:55:24.000Z | altair_examples/layered_bar_chart.py | progressivis/altair_examples | 9a453ecebdf0bd4776696d4233bee2a8f2eb3606 | [
"BSD-3-Clause"
] | null | null | null | altair_examples/layered_bar_chart.py | progressivis/altair_examples | 9a453ecebdf0bd4776696d4233bee2a8f2eb3606 | [
"BSD-3-Clause"
] | null | null | null | """
Layered Bar Chart
-----------------
This example shows a segmented bar chart that is layered rather than stacked.
"""
# category: bar charts
import altair as alt
from vega_datasets import data
source = data.iowa_electricity()
(
alt.Chart(source)
.mark_bar(opacity=0.7)
.encode(
x="year:O", y=alt.Y("net_generation:Q", stack=None), color="source",
)
)
| 20.052632 | 77 | 0.653543 |
import altair as alt
from vega_datasets import data
source = data.iowa_electricity()
(
alt.Chart(source)
.mark_bar(opacity=0.7)
.encode(
x="year:O", y=alt.Y("net_generation:Q", stack=None), color="source",
)
)
| true | true |
1c3a8208ad35a4ea36b511d1167fb073a1043820 | 5,503 | py | Python | django_redis/cache.py | techequipt/django-redis | c22ce847d6a8648a304a3cdab49b7bfbac65be95 | [
"BSD-3-Clause"
] | null | null | null | django_redis/cache.py | techequipt/django-redis | c22ce847d6a8648a304a3cdab49b7bfbac65be95 | [
"BSD-3-Clause"
] | null | null | null | django_redis/cache.py | techequipt/django-redis | c22ce847d6a8648a304a3cdab49b7bfbac65be95 | [
"BSD-3-Clause"
] | null | null | null | import functools
import logging
from typing import Any, Callable, Dict, Optional
from django import VERSION as DJANGO_VERSION
from django.conf import settings
from django.core.cache.backends.base import BaseCache
from django.utils.module_loading import import_string
from .exceptions import ConnectionInterrupted, PickleError
DJANGO_REDIS_SCAN_ITERSIZE = getattr(settings, "DJANGO_REDIS_SCAN_ITERSIZE", 10)
CONNECTION_INTERRUPTED = object()
def omit_exception(
method: Optional[Callable] = None, return_value: Optional[Any] = None
):
"""
Simple decorator that intercepts connection
errors and ignores these if settings specify this.
"""
if method is None:
return functools.partial(omit_exception, return_value=return_value)
@functools.wraps(method)
def _decorator(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except (ConnectionInterrupted, PickleError) as e:
if self._ignore_exceptions:
if self._log_ignored_exceptions:
self.logger.error(str(e))
return return_value
raise e.__cause__
return _decorator
class RedisCache(BaseCache):
def __init__(self, server: str, params: Dict[str, Any]) -> None:
super().__init__(params)
self._server = server
self._params = params
options = params.get("OPTIONS", {})
self._client_cls = options.get(
"CLIENT_CLASS", "django_redis.client.DefaultClient"
)
self._client_cls = import_string(self._client_cls)
self._client = None
self._ignore_exceptions = options.get(
"IGNORE_EXCEPTIONS",
getattr(settings, "DJANGO_REDIS_IGNORE_EXCEPTIONS", False),
)
self._log_ignored_exceptions = getattr(
settings, "DJANGO_REDIS_LOG_IGNORED_EXCEPTIONS", False
)
self.logger = (
logging.getLogger(getattr(settings, "DJANGO_REDIS_LOGGER", __name__))
if self._log_ignored_exceptions
else None
)
@property
def client(self):
"""
Lazy client connection property.
"""
if self._client is None:
self._client = self._client_cls(self._server, self._params, self)
return self._client
@omit_exception
def set(self, *args, **kwargs):
return self.client.set(*args, **kwargs)
@omit_exception
def incr_version(self, *args, **kwargs):
return self.client.incr_version(*args, **kwargs)
@omit_exception
def add(self, *args, **kwargs):
return self.client.add(*args, **kwargs)
def get(self, key, default=None, version=None, client=None):
value = self._get(key, default, version, client)
if value is CONNECTION_INTERRUPTED:
value = default
return value
@omit_exception(return_value=CONNECTION_INTERRUPTED)
def _get(self, key, default, version, client):
return self.client.get(key, default=default, version=version, client=client)
@omit_exception
def delete(self, *args, **kwargs):
"""returns a boolean instead of int since django version 3.1"""
result = self.client.delete(*args, **kwargs)
return bool(result) if DJANGO_VERSION >= (3, 1, 0) else result
@omit_exception
def delete_pattern(self, *args, **kwargs):
kwargs["itersize"] = kwargs.get("itersize", DJANGO_REDIS_SCAN_ITERSIZE)
return self.client.delete_pattern(*args, **kwargs)
@omit_exception
def delete_many(self, *args, **kwargs):
return self.client.delete_many(*args, **kwargs)
@omit_exception
def clear(self):
return self.client.clear()
@omit_exception(return_value={})
def get_many(self, *args, **kwargs):
return self.client.get_many(*args, **kwargs)
@omit_exception
def set_many(self, *args, **kwargs):
return self.client.set_many(*args, **kwargs)
@omit_exception
def incr(self, *args, **kwargs):
return self.client.incr(*args, **kwargs)
@omit_exception
def decr(self, *args, **kwargs):
return self.client.decr(*args, **kwargs)
@omit_exception
def has_key(self, *args, **kwargs):
return self.client.has_key(*args, **kwargs)
@omit_exception
def keys(self, *args, **kwargs):
return self.client.keys(*args, **kwargs)
@omit_exception
def iter_keys(self, *args, **kwargs):
return self.client.iter_keys(*args, **kwargs)
@omit_exception
def ttl(self, *args, **kwargs):
return self.client.ttl(*args, **kwargs)
@omit_exception
def pttl(self, *args, **kwargs):
return self.client.pttl(*args, **kwargs)
@omit_exception
def persist(self, *args, **kwargs):
return self.client.persist(*args, **kwargs)
@omit_exception
def expire(self, *args, **kwargs):
return self.client.expire(*args, **kwargs)
@omit_exception
def expire_at(self, *args, **kwargs):
return self.client.expire_at(*args, **kwargs)
@omit_exception
def pexpire(self, *args, **kwargs):
return self.client.pexpire(*args, **kwargs)
@omit_exception
def lock(self, *args, **kwargs):
return self.client.lock(*args, **kwargs)
@omit_exception
def close(self, **kwargs):
self.client.close(**kwargs)
@omit_exception
def touch(self, *args, **kwargs):
return self.client.touch(*args, **kwargs)
| 30.236264 | 84 | 0.644376 | import functools
import logging
from typing import Any, Callable, Dict, Optional
from django import VERSION as DJANGO_VERSION
from django.conf import settings
from django.core.cache.backends.base import BaseCache
from django.utils.module_loading import import_string
from .exceptions import ConnectionInterrupted, PickleError
DJANGO_REDIS_SCAN_ITERSIZE = getattr(settings, "DJANGO_REDIS_SCAN_ITERSIZE", 10)
CONNECTION_INTERRUPTED = object()
def omit_exception(
method: Optional[Callable] = None, return_value: Optional[Any] = None
):
if method is None:
return functools.partial(omit_exception, return_value=return_value)
@functools.wraps(method)
def _decorator(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except (ConnectionInterrupted, PickleError) as e:
if self._ignore_exceptions:
if self._log_ignored_exceptions:
self.logger.error(str(e))
return return_value
raise e.__cause__
return _decorator
class RedisCache(BaseCache):
def __init__(self, server: str, params: Dict[str, Any]) -> None:
super().__init__(params)
self._server = server
self._params = params
options = params.get("OPTIONS", {})
self._client_cls = options.get(
"CLIENT_CLASS", "django_redis.client.DefaultClient"
)
self._client_cls = import_string(self._client_cls)
self._client = None
self._ignore_exceptions = options.get(
"IGNORE_EXCEPTIONS",
getattr(settings, "DJANGO_REDIS_IGNORE_EXCEPTIONS", False),
)
self._log_ignored_exceptions = getattr(
settings, "DJANGO_REDIS_LOG_IGNORED_EXCEPTIONS", False
)
self.logger = (
logging.getLogger(getattr(settings, "DJANGO_REDIS_LOGGER", __name__))
if self._log_ignored_exceptions
else None
)
@property
def client(self):
if self._client is None:
self._client = self._client_cls(self._server, self._params, self)
return self._client
@omit_exception
def set(self, *args, **kwargs):
return self.client.set(*args, **kwargs)
@omit_exception
def incr_version(self, *args, **kwargs):
return self.client.incr_version(*args, **kwargs)
@omit_exception
def add(self, *args, **kwargs):
return self.client.add(*args, **kwargs)
def get(self, key, default=None, version=None, client=None):
value = self._get(key, default, version, client)
if value is CONNECTION_INTERRUPTED:
value = default
return value
@omit_exception(return_value=CONNECTION_INTERRUPTED)
def _get(self, key, default, version, client):
return self.client.get(key, default=default, version=version, client=client)
@omit_exception
def delete(self, *args, **kwargs):
result = self.client.delete(*args, **kwargs)
return bool(result) if DJANGO_VERSION >= (3, 1, 0) else result
@omit_exception
def delete_pattern(self, *args, **kwargs):
kwargs["itersize"] = kwargs.get("itersize", DJANGO_REDIS_SCAN_ITERSIZE)
return self.client.delete_pattern(*args, **kwargs)
@omit_exception
def delete_many(self, *args, **kwargs):
return self.client.delete_many(*args, **kwargs)
@omit_exception
def clear(self):
return self.client.clear()
@omit_exception(return_value={})
def get_many(self, *args, **kwargs):
return self.client.get_many(*args, **kwargs)
@omit_exception
def set_many(self, *args, **kwargs):
return self.client.set_many(*args, **kwargs)
@omit_exception
def incr(self, *args, **kwargs):
return self.client.incr(*args, **kwargs)
@omit_exception
def decr(self, *args, **kwargs):
return self.client.decr(*args, **kwargs)
@omit_exception
def has_key(self, *args, **kwargs):
return self.client.has_key(*args, **kwargs)
@omit_exception
def keys(self, *args, **kwargs):
return self.client.keys(*args, **kwargs)
@omit_exception
def iter_keys(self, *args, **kwargs):
return self.client.iter_keys(*args, **kwargs)
@omit_exception
def ttl(self, *args, **kwargs):
return self.client.ttl(*args, **kwargs)
@omit_exception
def pttl(self, *args, **kwargs):
return self.client.pttl(*args, **kwargs)
@omit_exception
def persist(self, *args, **kwargs):
return self.client.persist(*args, **kwargs)
@omit_exception
def expire(self, *args, **kwargs):
return self.client.expire(*args, **kwargs)
@omit_exception
def expire_at(self, *args, **kwargs):
return self.client.expire_at(*args, **kwargs)
@omit_exception
def pexpire(self, *args, **kwargs):
return self.client.pexpire(*args, **kwargs)
@omit_exception
def lock(self, *args, **kwargs):
return self.client.lock(*args, **kwargs)
@omit_exception
def close(self, **kwargs):
self.client.close(**kwargs)
@omit_exception
def touch(self, *args, **kwargs):
return self.client.touch(*args, **kwargs)
| true | true |
1c3a84bbaa43a9a2f21d753c51547195f7daa84d | 577 | py | Python | Leetcode/week_1/p0001_two_sum.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | 1 | 2021-07-07T00:55:23.000Z | 2021-07-07T00:55:23.000Z | Leetcode/week_1/p0001_two_sum.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | null | null | null | Leetcode/week_1/p0001_two_sum.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | null | null | null | from collections import defaultdict
from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
complement_dict = defaultdict(int)
for i, num in enumerate(nums):
if target - num in complement_dict:
return [complement_dict[target - num], i]
complement_dict[num] = i
"""
Runtime O(N)
Space complexity O(N)
Runtime: 48 ms, faster than 79.33% of Python3 online submissions for Two Sum.
Memory Usage: 14.3 MB, less than 53.95% of Python3 online submissions for Two Sum.
"""
| 25.086957 | 82 | 0.663778 | from collections import defaultdict
from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
complement_dict = defaultdict(int)
for i, num in enumerate(nums):
if target - num in complement_dict:
return [complement_dict[target - num], i]
complement_dict[num] = i
| true | true |
1c3a84f933c71f07c6fcdbe25450625810daa097 | 3,173 | py | Python | utils.py | nielsota/GANs | 7c4043022ba0fdd2d1f163abf70b7bd3f06be908 | [
"MIT"
] | null | null | null | utils.py | nielsota/GANs | 7c4043022ba0fdd2d1f163abf70b7bd3f06be908 | [
"MIT"
] | null | null | null | utils.py | nielsota/GANs | 7c4043022ba0fdd2d1f163abf70b7bd3f06be908 | [
"MIT"
] | null | null | null | from torchvision.utils import make_grid
import matplotlib.pyplot as plt
import argparse
import os
import torch
from Data import *
################################################################################
############################## UTILITY FUNCTIONS ###############################
################################################################################
# Show images
def show_tensor_images(image_tensor, num_images=25, size=(1, 28, 28)):
# The asterisk (*) can be used in python to unpack a list into its individual elements,
# thus passing to view the correct form of input arguments it expects.
# .detach(): cannot call view on a variable that requires grad
# .cpu() because stores in CUDA
image_tensor_unflat = image_tensor.detach().cpu().view(-1, *size)
image_grid = make_grid(image_tensor_unflat[:num_images], nrow=5)
plt.imshow(image_grid.permute(1, 2, 0).squeeze())
#plt.show()
# Show time series plots
def make_timeseries_plots(time_series_tensor, num_plots: int = 10):
fig, axs = plt.subplots(num_plots)
fig.set_size_inches(18.5, num_plots * 2)
colors = ["blue", "red", "green", "purple"]
# fig.suptitle('Time series plot', fontsize=12)
for i in range(num_plots):
axs[i].plot(time_series_tensor.detach().view(len(time_series_tensor), -1)[i], color=colors[i % len(colors)])
axs[i].grid()
return fig
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def makedirectory(dir_name):
parent_dir = os.getcwd()
directory = dir_name
models_path = os.path.join(str(parent_dir), directory)
if not os.path.exists(models_path):
os.mkdir(models_path)
print("Directory '% s' created" % directory)
else:
pass
def combine_vectors(z, y):
return torch.cat((z.float(),y.float()), 1)
def combine_noise_and_labels(data, labels):
"""
Combine [32,100] and [32,C] into [32,C+1,100]
where each element [32,C] is repeated over entire channel
"""
# shape -> [32, 6 , 1]
labels = labels[:, :, None]
# shape -> [32, 6 , 100]
repeated_labels = labels.repeat(1, 1, 100)
# Combine; data[:, None, :] has shape [32, 1, 100]
data_and_labels = combine_vectors(data[:, None, :], repeated_labels)
return data_and_labels
################################################################################
################################################################################
if __name__ == '__main__':
print("Building test dataset...")
test_dataloader = load_arima_data(batch_size=128, dgp = 'arma_11_variable')
X, y = next(iter(test_dataloader))
combined = combine_noise_and_labels(X, y)
print("Output shape: {}".format(X.shape))
print("Labels shape: {}".format(y.shape))
print("Combined shape: {}".format(combined.shape))
print(combined[:, 0, :].shape)
print(y.shape[1])
make_timeseries_plots(combined[:, 0, :])
| 32.71134 | 116 | 0.57359 | from torchvision.utils import make_grid
import matplotlib.pyplot as plt
import argparse
import os
import torch
from Data import *
| true | true |
1c3a850cb701d562f7d1a3c856e13af69a80f58d | 1,999 | py | Python | src/data_gen/ohem.py | kehuaWangfff/FashionAI_KeyPoint_Detection_Challenge_Keras | 02422f315403fae4dcd87abf90b08ae9183d75f0 | [
"MIT"
] | 169 | 2018-05-24T08:22:03.000Z | 2022-02-02T15:25:17.000Z | src/data_gen/ohem.py | Koeru/FashionAI_KeyPoint_Detection_Challenge_Keras | 0b3bd8cdee32e05619300e5466578644974279df | [
"MIT"
] | 12 | 2018-05-29T15:40:50.000Z | 2021-11-17T07:35:21.000Z | src/data_gen/ohem.py | Koeru/FashionAI_KeyPoint_Detection_Challenge_Keras | 0b3bd8cdee32e05619300e5466578644974279df | [
"MIT"
] | 54 | 2018-05-25T13:57:42.000Z | 2022-02-08T03:00:07.000Z |
import sys
sys.path.insert(0, "../unet/")
from keras.models import *
from keras.layers import *
from utils import np_euclidean_l2
from dataset import getKpNum
def generate_topk_mask_ohem(input_data, gthmap, keras_model, graph, topK, image_category, dynamicFlag=False):
'''
:param input_data: input
:param gthmap: ground truth
:param keras_model: keras model
:param graph: tf grpah to WA thread issue
:param topK: number of kp selected
:return:
'''
# do inference, and calculate loss of each channel
mimg, mmask = input_data
ximg = mimg[np.newaxis,:,:,:]
xmask = mmask[np.newaxis,:,:,:]
if len(keras_model.input_layers) == 3:
# use original mask as ohem_mask
inputs = [ximg, xmask, xmask]
else:
inputs = [ximg, xmask]
with graph.as_default():
keras_output = keras_model.predict(inputs)
# heatmap of last stage
outhmap = keras_output[-1]
channel_num = gthmap.shape[-1]
# calculate loss
mloss = list()
for i in range(channel_num):
_dtmap = outhmap[0, :, :, i]
_gtmap = gthmap[:, :, i]
loss = np_euclidean_l2(_dtmap, _gtmap)
mloss.append(loss)
# refill input_mask, set topk as 1.0 and fill 0.0 for rest
# fixme: topk may different b/w category
if dynamicFlag:
topK = getKpNum(image_category)//2
ohem_mask = adjsut_mask(mloss, mmask, topK)
ohem_gthmap = ohem_mask * gthmap
return ohem_mask, ohem_gthmap
def adjsut_mask(loss, input_mask, topk):
# pick topk loss from losses
# fill topk with 1.0 and fill the rest as 0.0
assert (len(loss) == input_mask.shape[-1]), \
"shape should be same" + str(len(loss)) + " vs " + str(input_mask.shape)
outmask = np.zeros(input_mask.shape, dtype=np.float)
topk_index = sorted(range(len(loss)), key=lambda i:loss[i])[-topk:]
for i in range(len(loss)):
if i in topk_index:
outmask[:,:,i] = 1.0
return outmask
| 27.383562 | 109 | 0.63932 |
import sys
sys.path.insert(0, "../unet/")
from keras.models import *
from keras.layers import *
from utils import np_euclidean_l2
from dataset import getKpNum
def generate_topk_mask_ohem(input_data, gthmap, keras_model, graph, topK, image_category, dynamicFlag=False):
mimg, mmask = input_data
ximg = mimg[np.newaxis,:,:,:]
xmask = mmask[np.newaxis,:,:,:]
if len(keras_model.input_layers) == 3:
inputs = [ximg, xmask, xmask]
else:
inputs = [ximg, xmask]
with graph.as_default():
keras_output = keras_model.predict(inputs)
outhmap = keras_output[-1]
channel_num = gthmap.shape[-1]
mloss = list()
for i in range(channel_num):
_dtmap = outhmap[0, :, :, i]
_gtmap = gthmap[:, :, i]
loss = np_euclidean_l2(_dtmap, _gtmap)
mloss.append(loss)
if dynamicFlag:
topK = getKpNum(image_category)//2
ohem_mask = adjsut_mask(mloss, mmask, topK)
ohem_gthmap = ohem_mask * gthmap
return ohem_mask, ohem_gthmap
def adjsut_mask(loss, input_mask, topk):
assert (len(loss) == input_mask.shape[-1]), \
"shape should be same" + str(len(loss)) + " vs " + str(input_mask.shape)
outmask = np.zeros(input_mask.shape, dtype=np.float)
topk_index = sorted(range(len(loss)), key=lambda i:loss[i])[-topk:]
for i in range(len(loss)):
if i in topk_index:
outmask[:,:,i] = 1.0
return outmask
| true | true |
1c3a864ae8eabfe2c5b0f38f8dad8915a9ad63fe | 918 | py | Python | fbdplc/wires.py | Jmeyer1292/block_diagram_z3 | b7180d2dedc33ccb86aa3c58c898dd7adb9653fe | [
"Apache-2.0"
] | 4 | 2021-09-18T13:32:57.000Z | 2022-03-15T22:13:56.000Z | fbdplc/wires.py | Jmeyer1292/block_diagram_z3 | b7180d2dedc33ccb86aa3c58c898dd7adb9653fe | [
"Apache-2.0"
] | null | null | null | fbdplc/wires.py | Jmeyer1292/block_diagram_z3 | b7180d2dedc33ccb86aa3c58c898dd7adb9653fe | [
"Apache-2.0"
] | 2 | 2021-12-06T20:19:04.000Z | 2022-03-15T22:13:58.000Z | '''
Edges in a block diagram computational graph. The edges themselves don't have direction,
but the ports that they attach to may.
'''
class WireConnection:
pass
class NamedConnection(WireConnection):
def __init__(self, target_uid: int, target_port: str):
self.target_uid = target_uid
self.target_port = target_port
def __str__(self):
return f'NamedConnection(id={self.target_uid}, port={self.target_port})'
class IdentConnection(WireConnection):
def __init__(self, target_uid: int):
self.target_uid = target_uid
def __str__(self):
return f'IdentConnection(id={self.target_uid})'
class Wire:
'''
Wires in TIA's S7 XML format can have more than two terminals, but we always decompose them
into a series of two terminal blocks.
'''
def __init__(self, a: WireConnection, b: WireConnection):
self.a = a
self.b = b
| 24.810811 | 95 | 0.686275 |
class WireConnection:
pass
class NamedConnection(WireConnection):
def __init__(self, target_uid: int, target_port: str):
self.target_uid = target_uid
self.target_port = target_port
def __str__(self):
return f'NamedConnection(id={self.target_uid}, port={self.target_port})'
class IdentConnection(WireConnection):
def __init__(self, target_uid: int):
self.target_uid = target_uid
def __str__(self):
return f'IdentConnection(id={self.target_uid})'
class Wire:
def __init__(self, a: WireConnection, b: WireConnection):
self.a = a
self.b = b
| true | true |
1c3a86c8aa044f75d9b0a9d1eb1b70c3f380d689 | 8,071 | py | Python | addons/extras.py | NightYoshi370/Kurisu | a4a40498791a6a54f8bec6bc89444dd5a9b5651a | [
"Apache-2.0"
] | null | null | null | addons/extras.py | NightYoshi370/Kurisu | a4a40498791a6a54f8bec6bc89444dd5a9b5651a | [
"Apache-2.0"
] | null | null | null | addons/extras.py | NightYoshi370/Kurisu | a4a40498791a6a54f8bec6bc89444dd5a9b5651a | [
"Apache-2.0"
] | null | null | null | import datetime
import discord
import os
import random
import re
import string
from discord.ext import commands
from sys import argv
class Extras:
"""
Extra things.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
prune_key = "nokey"
@commands.command()
async def kurisu(self):
"""About Kurisu"""
embed = discord.Embed(title="Kurisu", color=discord.Color.green())
embed.set_author(name="916253 and ihaveahax")
embed.set_thumbnail(url="http://i.imgur.com/hjVY4Et.jpg")
embed.url = "https://github.com/916253/Kurisu"
embed.description = "Kurisu, the Nintendo Homebrew Discord bot!"
await self.bot.say("", embed=embed)
@commands.command()
async def membercount(self):
"""Prints the member count of the server."""
await self.bot.say("{} has {:,} members!".format(self.bot.server.name, self.bot.server.member_count))
@commands.has_permissions(ban_members=True)
@commands.command(hidden=True)
async def embedtext(self, *, text):
"""Embed content."""
await self.bot.say(embed=discord.Embed(description=text))
@commands.command(hidden=True)
async def timedelta(self, length):
# thanks Luc#5653
units = {
"d": 86400,
"h": 3600,
"m": 60,
"s": 1
}
seconds = 0
match = re.findall("([0-9]+[smhd])", length) # Thanks to 3dshax server's former bot
if match is None:
return None
for item in match:
seconds += int(item[:-1]) * units[item[-1]]
curr = datetime.datetime.now()
diff = datetime.timedelta(seconds=seconds)
# http://stackoverflow.com/questions/2119472/convert-a-timedelta-to-days-hours-and-minutes
days, hours, minutes = td.days, td.seconds//3600, (td.seconds//60)%60
msg = "```\ncurr: {}\nnew: {}\ndiff: {}\n```".format(
curr,
curr + diff,
diff
)
await self.bot.say(msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command()
async def estprune(self, days=30):
"""Estimate count of members that would be pruned based on the amount of days. Staff only."""
if days > 30:
await self.bot.say("Maximum 30 days")
return
if days < 1:
await self.bot.say("Minimum 1 day")
return
msg = await self.bot.say("I'm figuring this out!".format(self.bot.server.name))
count = await self.bot.estimate_pruned_members(server=self.bot.server, days=days)
await self.bot.edit_message(msg, "{:,} members inactive for {} day(s) would be kicked from {}!".format(count, days, self.bot.server.name))
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True)
async def prune30(self, ctx, key=""):
"""Prune members that are inactive for 30 days. Staff only."""
if self.bot.pruning > 0:
await self.bot.say("Pruning is already in progress.")
return
if key != self.prune_key:
if key != "":
await self.bot.say("That's not the correct key.")
self.prune_key = ''.join(random.sample(string.ascii_letters, 8))
await self.bot.say("Are you sure you want to prune members inactive for 30 days?\nTo see how many members get kicked, use `.estprune`.\nTo confirm the prune, use the command `.prune30 {}`.".format(self.prune_key))
return
self.prune_key = ''.join(random.sample(string.ascii_letters, 8))
await self.bot.say("Starting pruning!")
count = await self.bot.prune_members(self.bot.server, days=30)
self.bot.pruning = count
await self.bot.send_message(self.bot.mods_channel, "{:,} are currently being kicked from {}!".format(count, self.bot.server.name))
msg = "👢 **Prune**: {} pruned {:,} members".format(ctx.message.author.mention, count)
await self.bot.send_message(self.bot.modlogs_channel, msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command()
async def disableleavelogs(self):
"""DEBUG COMMAND"""
self.bot.pruning = True
await self.bot.say("disable")
@commands.has_permissions(manage_nicknames=True)
@commands.command()
async def enableleavelogs(self):
"""DEBUG COMMAND"""
self.bot.pruning = False
await self.bot.say("enable")
@commands.command(name="32c3")
async def _32c3(self):
"""Console Hacking 2015"""
await self.bot.say("https://www.youtube.com/watch?v=bZczf57HSag")
@commands.command(name="33c3")
async def _33c3(self):
"""Nintendo Hacking 2016"""
await self.bot.say("https://www.youtube.com/watch?v=8C5cn_Qj0G8")
@commands.command()
async def de(self):
invalid()
@commands.has_permissions(administrator=True)
@commands.command(pass_context=True, hidden=True)
async def dumpchannel(self, ctx, channel_name, limit=100):
"""Dump 100 messages from a channel to a file."""
channel = ctx.message.channel_mentions[0]
await self.bot.say("Dumping {} messages from {}".format(limit, channel.mention))
os.makedirs("#{}-{}".format(channel.name, channel.id), exist_ok=True)
async for message in self.bot.logs_from(channel, limit=limit):
with open("#{}-{}/{}.txt".format(channel.name, channel.id, message.id), "w") as f:
f.write(message.content)
await self.bot.say("Done!")
@commands.command(pass_context=True, hidden=True)
async def togglechannel(self, ctx, channelname):
"""Enable or disable access to specific channels."""
author = ctx.message.author
await self.bot.delete_message(ctx.message)
if channelname == "elsewhere":
if self.bot.elsewhere_role in author.roles:
await self.bot.remove_roles(author, self.bot.elsewhere_role)
await self.bot.send_message(author, "Access to #elsewhere removed.")
else:
await self.bot.add_roles(author, self.bot.elsewhere_role)
await self.bot.send_message(author, "Access to #elsewhere granted.")
if channelname == "eventchat":
if self.bot.eventchat_role in author.roles:
await self.bot.remove_roles(author, self.bot.eventchat_role)
await self.bot.send_message(author, "Access to #eventchat granted.")
else:
await self.bot.add_roles(author, self.bot.eventchat_role)
await self.bot.send_message(author, "Access to #eventchat granted.")
else:
await self.bot.send_message(author, "{} is not a valid toggleable channel.".format(channelname))
@commands.command(pass_context=True)
async def rainbow(self, ctx):
"""Pride month!"""
member = ctx.message.author
if member.nick and member.nick[-1] == "🌈":
await self.bot.say("Your nickname already ends in a rainbow!")
elif member.name[-1] == "🌈":
await self.bot.say("Your name already ends in a rainbow!")
else:
await self.bot.say("Your nickname is now \"{} \\🌈\"!".format(member.display_name))
await self.bot.change_nickname(member, member.display_name + " 🌈")
@commands.command(pass_context=True)
async def norainbow(self, ctx):
"""Tired of it."""
member = ctx.message.author
if member.nick and member.nick[-1] == "🌈":
await self.bot.say("Your nickname is now \"{}\"!".format(member.display_name[0:-1].strip()))
await self.bot.change_nickname(member, member.display_name[0:-1])
elif member.name[-1] == "🌈":
await self.bot.say("Your username is the one with the rainbow!")
else:
await self.bot.say("You don't have a rainbow!")
def setup(bot):
bot.add_cog(Extras(bot))
| 42.478947 | 225 | 0.61752 | import datetime
import discord
import os
import random
import re
import string
from discord.ext import commands
from sys import argv
class Extras:
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
prune_key = "nokey"
@commands.command()
async def kurisu(self):
embed = discord.Embed(title="Kurisu", color=discord.Color.green())
embed.set_author(name="916253 and ihaveahax")
embed.set_thumbnail(url="http://i.imgur.com/hjVY4Et.jpg")
embed.url = "https://github.com/916253/Kurisu"
embed.description = "Kurisu, the Nintendo Homebrew Discord bot!"
await self.bot.say("", embed=embed)
@commands.command()
async def membercount(self):
await self.bot.say("{} has {:,} members!".format(self.bot.server.name, self.bot.server.member_count))
@commands.has_permissions(ban_members=True)
@commands.command(hidden=True)
async def embedtext(self, *, text):
await self.bot.say(embed=discord.Embed(description=text))
@commands.command(hidden=True)
async def timedelta(self, length):
units = {
"d": 86400,
"h": 3600,
"m": 60,
"s": 1
}
seconds = 0
match = re.findall("([0-9]+[smhd])", length)
if match is None:
return None
for item in match:
seconds += int(item[:-1]) * units[item[-1]]
curr = datetime.datetime.now()
diff = datetime.timedelta(seconds=seconds)
# http://stackoverflow.com/questions/2119472/convert-a-timedelta-to-days-hours-and-minutes
days, hours, minutes = td.days, td.seconds//3600, (td.seconds//60)%60
msg = "```\ncurr: {}\nnew: {}\ndiff: {}\n```".format(
curr,
curr + diff,
diff
)
await self.bot.say(msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command()
async def estprune(self, days=30):
if days > 30:
await self.bot.say("Maximum 30 days")
return
if days < 1:
await self.bot.say("Minimum 1 day")
return
msg = await self.bot.say("I'm figuring this out!".format(self.bot.server.name))
count = await self.bot.estimate_pruned_members(server=self.bot.server, days=days)
await self.bot.edit_message(msg, "{:,} members inactive for {} day(s) would be kicked from {}!".format(count, days, self.bot.server.name))
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True)
async def prune30(self, ctx, key=""):
if self.bot.pruning > 0:
await self.bot.say("Pruning is already in progress.")
return
if key != self.prune_key:
if key != "":
await self.bot.say("That's not the correct key.")
self.prune_key = ''.join(random.sample(string.ascii_letters, 8))
await self.bot.say("Are you sure you want to prune members inactive for 30 days?\nTo see how many members get kicked, use `.estprune`.\nTo confirm the prune, use the command `.prune30 {}`.".format(self.prune_key))
return
self.prune_key = ''.join(random.sample(string.ascii_letters, 8))
await self.bot.say("Starting pruning!")
count = await self.bot.prune_members(self.bot.server, days=30)
self.bot.pruning = count
await self.bot.send_message(self.bot.mods_channel, "{:,} are currently being kicked from {}!".format(count, self.bot.server.name))
msg = "👢 **Prune**: {} pruned {:,} members".format(ctx.message.author.mention, count)
await self.bot.send_message(self.bot.modlogs_channel, msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command()
async def disableleavelogs(self):
self.bot.pruning = True
await self.bot.say("disable")
@commands.has_permissions(manage_nicknames=True)
@commands.command()
async def enableleavelogs(self):
self.bot.pruning = False
await self.bot.say("enable")
@commands.command(name="32c3")
async def _32c3(self):
await self.bot.say("https://www.youtube.com/watch?v=bZczf57HSag")
@commands.command(name="33c3")
async def _33c3(self):
await self.bot.say("https://www.youtube.com/watch?v=8C5cn_Qj0G8")
@commands.command()
async def de(self):
invalid()
@commands.has_permissions(administrator=True)
@commands.command(pass_context=True, hidden=True)
async def dumpchannel(self, ctx, channel_name, limit=100):
channel = ctx.message.channel_mentions[0]
await self.bot.say("Dumping {} messages from {}".format(limit, channel.mention))
os.makedirs("#{}-{}".format(channel.name, channel.id), exist_ok=True)
async for message in self.bot.logs_from(channel, limit=limit):
with open("#{}-{}/{}.txt".format(channel.name, channel.id, message.id), "w") as f:
f.write(message.content)
await self.bot.say("Done!")
@commands.command(pass_context=True, hidden=True)
async def togglechannel(self, ctx, channelname):
author = ctx.message.author
await self.bot.delete_message(ctx.message)
if channelname == "elsewhere":
if self.bot.elsewhere_role in author.roles:
await self.bot.remove_roles(author, self.bot.elsewhere_role)
await self.bot.send_message(author, "Access to #elsewhere removed.")
else:
await self.bot.add_roles(author, self.bot.elsewhere_role)
await self.bot.send_message(author, "Access to #elsewhere granted.")
if channelname == "eventchat":
if self.bot.eventchat_role in author.roles:
await self.bot.remove_roles(author, self.bot.eventchat_role)
await self.bot.send_message(author, "Access to #eventchat granted.")
else:
await self.bot.add_roles(author, self.bot.eventchat_role)
await self.bot.send_message(author, "Access to #eventchat granted.")
else:
await self.bot.send_message(author, "{} is not a valid toggleable channel.".format(channelname))
@commands.command(pass_context=True)
async def rainbow(self, ctx):
member = ctx.message.author
if member.nick and member.nick[-1] == "🌈":
await self.bot.say("Your nickname already ends in a rainbow!")
elif member.name[-1] == "🌈":
await self.bot.say("Your name already ends in a rainbow!")
else:
await self.bot.say("Your nickname is now \"{} \\🌈\"!".format(member.display_name))
await self.bot.change_nickname(member, member.display_name + " 🌈")
@commands.command(pass_context=True)
async def norainbow(self, ctx):
member = ctx.message.author
if member.nick and member.nick[-1] == "🌈":
await self.bot.say("Your nickname is now \"{}\"!".format(member.display_name[0:-1].strip()))
await self.bot.change_nickname(member, member.display_name[0:-1])
elif member.name[-1] == "🌈":
await self.bot.say("Your username is the one with the rainbow!")
else:
await self.bot.say("You don't have a rainbow!")
def setup(bot):
bot.add_cog(Extras(bot))
| true | true |
1c3a87252df33ddf8d8ca65308e4c22681cf4307 | 286 | py | Python | src/catchbot/updater.py | grihabor/catch-hook-telegram-bot | 1f3c6a5d56d5ebba3d4620b532acde2ed734a75e | [
"MIT"
] | null | null | null | src/catchbot/updater.py | grihabor/catch-hook-telegram-bot | 1f3c6a5d56d5ebba3d4620b532acde2ed734a75e | [
"MIT"
] | 4 | 2018-02-21T11:25:49.000Z | 2018-06-23T15:51:51.000Z | src/catchbot/updater.py | grihabor/catch-hook-telegram-bot | 1f3c6a5d56d5ebba3d4620b532acde2ed734a75e | [
"MIT"
] | null | null | null | import telegram.ext as tg
from .commands import commands
def create_updater(token):
updater = tg.Updater(token=token)
for command in commands:
handler = tg.CommandHandler(command.__name__, command)
updater.dispatcher.add_handler(handler)
return updater
| 20.428571 | 62 | 0.727273 | import telegram.ext as tg
from .commands import commands
def create_updater(token):
updater = tg.Updater(token=token)
for command in commands:
handler = tg.CommandHandler(command.__name__, command)
updater.dispatcher.add_handler(handler)
return updater
| true | true |
1c3a8743e8a520f7f7fa08e01d17d42fc8701986 | 1,594 | py | Python | examples/performance_example.py | ntezak/plotille | 7baa4fa176ced99f7a9ab9688e99b525ef3edf4d | [
"MIT"
] | 157 | 2017-09-28T12:16:52.000Z | 2022-03-31T08:13:23.000Z | examples/performance_example.py | ntezak/plotille | 7baa4fa176ced99f7a9ab9688e99b525ef3edf4d | [
"MIT"
] | 43 | 2017-11-01T19:21:21.000Z | 2022-03-27T08:36:56.000Z | examples/performance_example.py | ntezak/plotille | 7baa4fa176ced99f7a9ab9688e99b525ef3edf4d | [
"MIT"
] | 12 | 2018-01-14T08:05:07.000Z | 2021-07-31T05:15:38.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# The MIT License
# Copyright (c) 2017 - 2021 Tammo Ippen, tammo.ippen@posteo.de
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from random import random
from time import time
import plotille
def main():
y = []
for _ in range(1000):
y.append(random())
x = list(range(1000))
t0 = time()
for _ in range(100):
plotille.plot(x, y, height=30, width=60)
print('Took {:.3f} sec'.format(time() - t0))
if __name__ == '__main__':
main()
| 34.652174 | 82 | 0.733375 |
from __future__ import absolute_import, division, print_function, unicode_literals
from random import random
from time import time
import plotille
def main():
y = []
for _ in range(1000):
y.append(random())
x = list(range(1000))
t0 = time()
for _ in range(100):
plotille.plot(x, y, height=30, width=60)
print('Took {:.3f} sec'.format(time() - t0))
if __name__ == '__main__':
main()
| true | true |
1c3a887dabb03f7c39d9c6c34de15cc8e62da156 | 1,254 | py | Python | sub_price/check_sub_price.py | nengchibawanfan/PriceServer | b8ea95fa60ca5b940417f56c45a757c661d83e27 | [
"Apache-2.0"
] | null | null | null | sub_price/check_sub_price.py | nengchibawanfan/PriceServer | b8ea95fa60ca5b940417f56c45a757c661d83e27 | [
"Apache-2.0"
] | null | null | null | sub_price/check_sub_price.py | nengchibawanfan/PriceServer | b8ea95fa60ca5b940417f56c45a757c661d83e27 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Author: zhangchao
# Date: 2019-07-18
# Desc: 监控获取价格是否正常,不正常,就重启
import datetime
import sys
sys.path.append("..")
from priceserver.common.db_connection import ConnectRedis
import os
import time
r = ConnectRedis()
while True:
now = time.time()
bytetrade_data = float(r.get("Receive_the_data_bytetrade1"))
huobi_data = float(r.get("Receive_the_data_huobi1"))
coinbase_data = float(r.get("Receive_the_data_coinbase1"))
bytetrade = now - bytetrade_data
huobi = now - huobi_data
coinbase = now - coinbase_data
if bytetrade > 60 * 5:
cmd_str = 'pm2 restart ' + "sub_bytetrade_price_test"
result = os.system(cmd_str)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("重启bytetrade")
if huobi > 60 * 5:
cmd_str = 'pm2 restart ' + "sub_huobi_price_test"
result = os.system(cmd_str)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("重启huobipro")
if coinbase > 60 * 5:
cmd_str = 'pm2 restart ' + "get_coinbase_price_test"
result = os.system(cmd_str)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("重启coinbase")
time.sleep(60 * 3)
| 24.115385 | 68 | 0.633174 |
import datetime
import sys
sys.path.append("..")
from priceserver.common.db_connection import ConnectRedis
import os
import time
r = ConnectRedis()
while True:
now = time.time()
bytetrade_data = float(r.get("Receive_the_data_bytetrade1"))
huobi_data = float(r.get("Receive_the_data_huobi1"))
coinbase_data = float(r.get("Receive_the_data_coinbase1"))
bytetrade = now - bytetrade_data
huobi = now - huobi_data
coinbase = now - coinbase_data
if bytetrade > 60 * 5:
cmd_str = 'pm2 restart ' + "sub_bytetrade_price_test"
result = os.system(cmd_str)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("重启bytetrade")
if huobi > 60 * 5:
cmd_str = 'pm2 restart ' + "sub_huobi_price_test"
result = os.system(cmd_str)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("重启huobipro")
if coinbase > 60 * 5:
cmd_str = 'pm2 restart ' + "get_coinbase_price_test"
result = os.system(cmd_str)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("重启coinbase")
time.sleep(60 * 3)
| true | true |
1c3a88c75dc22fd6c086daa4e3caa2aea840cf10 | 143 | py | Python | painter/config_example.py | pwentrys/SubstanceHelpers | 8fb56158ee149792219e9cdb9479aaaed09a46bc | [
"MIT"
] | 2 | 2018-09-12T23:35:33.000Z | 2019-10-09T06:56:17.000Z | painter/config_example.py | pwentrys/SubstanceHelpers | 8fb56158ee149792219e9cdb9479aaaed09a46bc | [
"MIT"
] | null | null | null | painter/config_example.py | pwentrys/SubstanceHelpers | 8fb56158ee149792219e9cdb9479aaaed09a46bc | [
"MIT"
] | null | null | null | NAME = "Painter"
# Install path
PATH = 'C:\Program Files\Allegorithmic\Substance Painter 2'
# Shelf locs
SHELVES = [
'C:\PainterShelf'
]
| 14.3 | 59 | 0.685315 | NAME = "Painter"
PATH = 'C:\Program Files\Allegorithmic\Substance Painter 2'
SHELVES = [
'C:\PainterShelf'
]
| true | true |
1c3a8a24e9fc061f7eddf2ad3093701e875437f6 | 541 | py | Python | tests/functional/test_user.py | dimkonko/pyramid_api_example | b3bfb8dbe623544af9b147081d3e25c75a191c24 | [
"MIT"
] | null | null | null | tests/functional/test_user.py | dimkonko/pyramid_api_example | b3bfb8dbe623544af9b147081d3e25c75a191c24 | [
"MIT"
] | null | null | null | tests/functional/test_user.py | dimkonko/pyramid_api_example | b3bfb8dbe623544af9b147081d3e25c75a191c24 | [
"MIT"
] | null | null | null | from http import HTTPStatus
from tests import utils
from tests.functional import BaseFunctionalTest
from pyramid_api_example import routes
from pyramid_api_example.constants import json_keys
class UserFuncTest(BaseFunctionalTest):
def test_get_user(self):
expected_user_sid = 1
url = routes.USER.format(user_sid=expected_user_sid)
res = self.app.get(url, status=HTTPStatus.OK)
response_json = utils.parse_json(res.body)
self.assertEqual(expected_user_sid, response_json[json_keys.USER_SID])
| 28.473684 | 78 | 0.770795 | from http import HTTPStatus
from tests import utils
from tests.functional import BaseFunctionalTest
from pyramid_api_example import routes
from pyramid_api_example.constants import json_keys
class UserFuncTest(BaseFunctionalTest):
def test_get_user(self):
expected_user_sid = 1
url = routes.USER.format(user_sid=expected_user_sid)
res = self.app.get(url, status=HTTPStatus.OK)
response_json = utils.parse_json(res.body)
self.assertEqual(expected_user_sid, response_json[json_keys.USER_SID])
| true | true |
1c3a8afeb10498842e4277762aa6b8a972be7995 | 68,797 | py | Python | tests/test_build_latex.py | Paebbels/sphinx | 6a0215198fea5ec1dbda398aee8c64999baa0c0d | [
"BSD-2-Clause"
] | null | null | null | tests/test_build_latex.py | Paebbels/sphinx | 6a0215198fea5ec1dbda398aee8c64999baa0c0d | [
"BSD-2-Clause"
] | null | null | null | tests/test_build_latex.py | Paebbels/sphinx | 6a0215198fea5ec1dbda398aee8c64999baa0c0d | [
"BSD-2-Clause"
] | null | null | null | """
test_build_latex
~~~~~~~~~~~~~~~~
Test the build process with LaTeX builder with the test root.
:copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import subprocess
from itertools import product
from shutil import copyfile
from subprocess import PIPE, CalledProcessError
import pytest
from sphinx.builders.latex import default_latex_documents
from sphinx.config import Config
from sphinx.errors import SphinxError
from sphinx.testing.util import strip_escseq
from sphinx.util.osutil import cd, ensuredir
from sphinx.writers.latex import LaTeXTranslator
from .test_build_html import ENV_WARNINGS
LATEX_ENGINES = ['pdflatex', 'lualatex', 'xelatex']
DOCCLASSES = ['howto', 'manual']
STYLEFILES = ['article.cls', 'fancyhdr.sty', 'titlesec.sty', 'amsmath.sty',
'framed.sty', 'color.sty', 'fancyvrb.sty',
'fncychap.sty', 'geometry.sty', 'kvoptions.sty', 'hyperref.sty']
LATEX_WARNINGS = ENV_WARNINGS + """\
%(root)s/index.rst:\\d+: WARNING: unknown option: &option
%(root)s/index.rst:\\d+: WARNING: citation not found: missing
%(root)s/index.rst:\\d+: WARNING: a suitable image for latex builder not found: foo.\\*
%(root)s/index.rst:\\d+: WARNING: Could not lex literal_block as "c". Highlighting skipped.
"""
# only run latex if all needed packages are there
def kpsetest(*filenames):
try:
subprocess.run(['kpsewhich'] + list(filenames), stdout=PIPE, stderr=PIPE, check=True)
return True
except (OSError, CalledProcessError):
return False # command not found or exit with non-zero
# compile latex document with app.config.latex_engine
def compile_latex_document(app, filename='python.tex'):
# now, try to run latex over it
try:
with cd(app.outdir):
ensuredir(app.config.latex_engine)
# keep a copy of latex file for this engine in case test fails
copyfile(filename, app.config.latex_engine + '/' + filename)
args = [app.config.latex_engine,
'--halt-on-error',
'--interaction=nonstopmode',
'-output-directory=%s' % app.config.latex_engine,
filename]
subprocess.run(args, stdout=PIPE, stderr=PIPE, check=True)
except OSError as exc: # most likely the latex executable was not found
raise pytest.skip.Exception from exc
except CalledProcessError as exc:
print(exc.stdout)
print(exc.stderr)
assert False, '%s exited with return code %s' % (app.config.latex_engine,
exc.returncode)
def skip_if_requested(testfunc):
if 'SKIP_LATEX_BUILD' in os.environ:
msg = 'Skip LaTeX builds because SKIP_LATEX_BUILD is set'
return pytest.mark.skipif(True, reason=msg)(testfunc)
else:
return testfunc
def skip_if_stylefiles_notfound(testfunc):
if kpsetest(*STYLEFILES) is False:
msg = 'not running latex, the required styles do not seem to be installed'
return pytest.mark.skipif(True, reason=msg)(testfunc)
else:
return testfunc
@skip_if_requested
@skip_if_stylefiles_notfound
@pytest.mark.parametrize(
"engine,docclass",
product(LATEX_ENGINES, DOCCLASSES),
)
@pytest.mark.sphinx('latex')
def test_build_latex_doc(app, status, warning, engine, docclass):
app.config.latex_engine = engine
app.config.latex_documents = [app.config.latex_documents[0][:4] + (docclass,)]
app.builder.init()
LaTeXTranslator.ignore_missing_images = True
app.builder.build_all()
# file from latex_additional_files
assert (app.outdir / 'svgimg.svg').isfile()
compile_latex_document(app, 'sphinxtests.tex')
@pytest.mark.sphinx('latex')
def test_writer(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'sphinxtests.tex').read_text()
assert ('\\begin{sphinxfigure-in-table}\n\\centering\n\\capstart\n'
'\\noindent\\sphinxincludegraphics{{img}.png}\n'
'\\sphinxfigcaption{figure in table}\\label{\\detokenize{markup:id8}}'
'\\end{sphinxfigure-in-table}\\relax' in result)
assert ('\\begin{wrapfigure}{r}{0pt}\n\\centering\n'
'\\noindent\\sphinxincludegraphics{{rimg}.png}\n'
'\\caption{figure with align option}\\label{\\detokenize{markup:id9}}'
'\\end{wrapfigure}' in result)
assert ('\\begin{wrapfigure}{r}{0.500\\linewidth}\n\\centering\n'
'\\noindent\\sphinxincludegraphics{{rimg}.png}\n'
'\\caption{figure with align \\& figwidth option}'
'\\label{\\detokenize{markup:id10}}'
'\\end{wrapfigure}' in result)
assert ('\\begin{wrapfigure}{r}{3cm}\n\\centering\n'
'\\noindent\\sphinxincludegraphics[width=3cm]{{rimg}.png}\n'
'\\caption{figure with align \\& width option}'
'\\label{\\detokenize{markup:id11}}'
'\\end{wrapfigure}' in result)
assert 'Footnotes' not in result
@pytest.mark.sphinx('latex', testroot='warnings', freshenv=True)
def test_latex_warnings(app, status, warning):
app.builder.build_all()
warnings = strip_escseq(re.sub(re.escape(os.sep) + '{1,2}', '/', warning.getvalue()))
warnings_exp = LATEX_WARNINGS % {
'root': re.escape(app.srcdir.replace(os.sep, '/'))}
assert re.match(warnings_exp + '$', warnings), \
'Warnings don\'t match:\n' + \
'--- Expected (regex):\n' + warnings_exp + \
'--- Got:\n' + warnings
@pytest.mark.sphinx('latex', testroot='basic')
def test_latex_basic(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert r'\title{The basic Sphinx documentation for testing}' in result
assert r'\release{}' in result
assert r'\renewcommand{\releasename}{}' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={
'latex_documents': [('index', 'test.tex', 'title', 'author', 'manual')]
})
def test_latex_basic_manual(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{report}' in result
assert r'\documentclass[letterpaper,10pt,english]{sphinxmanual}' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={
'latex_documents': [('index', 'test.tex', 'title', 'author', 'howto')]
})
def test_latex_basic_howto(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{article}' in result
assert r'\documentclass[letterpaper,10pt,english]{sphinxhowto}' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={
'language': 'ja',
'latex_documents': [('index', 'test.tex', 'title', 'author', 'manual')]
})
def test_latex_basic_manual_ja(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{ujbook}' in result
assert r'\documentclass[letterpaper,10pt,dvipdfmx]{sphinxmanual}' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={
'language': 'ja',
'latex_documents': [('index', 'test.tex', 'title', 'author', 'howto')]
})
def test_latex_basic_howto_ja(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{ujreport}' in result
assert r'\documentclass[letterpaper,10pt,dvipdfmx]{sphinxhowto}' in result
@pytest.mark.sphinx('latex', testroot='latex-theme')
def test_latex_theme(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{book}' in result
assert r'\documentclass[a4paper,12pt,english]{sphinxbook}' in result
@pytest.mark.sphinx('latex', testroot='latex-theme',
confoverrides={'latex_elements': {'papersize': 'b5paper',
'pointsize': '9pt'}})
def test_latex_theme_papersize(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{book}' in result
assert r'\documentclass[b5paper,9pt,english]{sphinxbook}' in result
@pytest.mark.sphinx('latex', testroot='latex-theme',
confoverrides={'latex_theme_options': {'papersize': 'b5paper',
'pointsize': '9pt'}})
def test_latex_theme_options(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{book}' in result
assert r'\documentclass[b5paper,9pt,english]{sphinxbook}' in result
@pytest.mark.sphinx('latex', testroot='basic', confoverrides={'language': 'zh'})
def test_latex_additional_settings_for_language_code(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert r'\usepackage{xeCJK}' in result
@pytest.mark.sphinx('latex', testroot='basic', confoverrides={'language': 'el'})
def test_latex_additional_settings_for_greek(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\usepackage{polyglossia}\n\\setmainlanguage{greek}' in result
assert '\\newfontfamily\\greekfonttt{FreeMono}' in result
@pytest.mark.sphinx('latex', testroot='latex-title')
def test_latex_title_after_admonitions(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\title{test\\sphinxhyphen{}latex\\sphinxhyphen{}title}' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={'release': '1.0_0'})
def test_latex_release(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert r'\release{1.0\_0}' in result
assert r'\renewcommand{\releasename}{Release}' in result
@pytest.mark.sphinx('latex', testroot='numfig',
confoverrides={'numfig': True})
def test_numref(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\hyperref[\\detokenize{index:fig1}]'
'{Fig.\\@ \\ref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{baz:fig22}]'
'{Figure\\ref{\\detokenize{baz:fig22}}}') in result
assert ('\\hyperref[\\detokenize{index:table-1}]'
'{Table \\ref{\\detokenize{index:table-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}') in result
assert ('\\hyperref[\\detokenize{index:code-1}]'
'{Listing \\ref{\\detokenize{index:code-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:code22}]'
'{Code\\sphinxhyphen{}\\ref{\\detokenize{baz:code22}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]'
'{Section \\ref{\\detokenize{foo:foo}}}') in result
assert ('\\hyperref[\\detokenize{bar:bar-a}]'
'{Section \\ref{\\detokenize{bar:bar-a}}}') in result
assert ('\\hyperref[\\detokenize{index:fig1}]{Fig.\\ref{\\detokenize{index:fig1}} '
'\\nameref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]{Sect.\\ref{\\detokenize{foo:foo}} '
'\\nameref{\\detokenize{foo:foo}}}') in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\addto\captionsenglish{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsenglish{\renewcommand{\tablename}{Table }}' in result
assert r'\addto\captionsenglish{\renewcommand{\literalblockname}{Listing}}' in result
@pytest.mark.sphinx(
'latex', testroot='numfig',
confoverrides={'numfig': True,
'numfig_format': {'figure': 'Figure:%s',
'table': 'Tab_%s',
'code-block': 'Code-%s',
'section': 'SECTION-%s'}})
def test_numref_with_prefix1(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\ref{\\detokenize{index:fig1}}' in result
assert '\\ref{\\detokenize{baz:fig22}}' in result
assert '\\ref{\\detokenize{index:table-1}}' in result
assert '\\ref{\\detokenize{baz:table22}}' in result
assert '\\ref{\\detokenize{index:code-1}}' in result
assert '\\ref{\\detokenize{baz:code22}}' in result
assert ('\\hyperref[\\detokenize{index:fig1}]'
'{Figure:\\ref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{baz:fig22}]'
'{Figure\\ref{\\detokenize{baz:fig22}}}') in result
assert ('\\hyperref[\\detokenize{index:table-1}]'
'{Tab\\_\\ref{\\detokenize{index:table-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}') in result
assert ('\\hyperref[\\detokenize{index:code-1}]'
'{Code\\sphinxhyphen{}\\ref{\\detokenize{index:code-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:code22}]'
'{Code\\sphinxhyphen{}\\ref{\\detokenize{baz:code22}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]'
'{SECTION\\sphinxhyphen{}\\ref{\\detokenize{foo:foo}}}') in result
assert ('\\hyperref[\\detokenize{bar:bar-a}]'
'{SECTION\\sphinxhyphen{}\\ref{\\detokenize{bar:bar-a}}}') in result
assert ('\\hyperref[\\detokenize{index:fig1}]{Fig.\\ref{\\detokenize{index:fig1}} '
'\\nameref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]{Sect.\\ref{\\detokenize{foo:foo}} '
'\\nameref{\\detokenize{foo:foo}}}') in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\addto\captionsenglish{\renewcommand{\figurename}{Figure:}}' in result
assert r'\addto\captionsenglish{\renewcommand{\tablename}{Tab\_}}' in result
assert r'\addto\captionsenglish{\renewcommand{\literalblockname}{Code-}}' in result
@pytest.mark.sphinx(
'latex', testroot='numfig',
confoverrides={'numfig': True,
'numfig_format': {'figure': 'Figure:%s.',
'table': 'Tab_%s:',
'code-block': 'Code-%s | ',
'section': 'SECTION_%s_'}})
def test_numref_with_prefix2(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\hyperref[\\detokenize{index:fig1}]'
'{Figure:\\ref{\\detokenize{index:fig1}}.\\@}') in result
assert ('\\hyperref[\\detokenize{baz:fig22}]'
'{Figure\\ref{\\detokenize{baz:fig22}}}') in result
assert ('\\hyperref[\\detokenize{index:table-1}]'
'{Tab\\_\\ref{\\detokenize{index:table-1}}:}') in result
assert ('\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}') in result
assert ('\\hyperref[\\detokenize{index:code-1}]{Code\\sphinxhyphen{}\\ref{\\detokenize{index:code-1}} '
'| }') in result
assert ('\\hyperref[\\detokenize{baz:code22}]'
'{Code\\sphinxhyphen{}\\ref{\\detokenize{baz:code22}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]'
'{SECTION\\_\\ref{\\detokenize{foo:foo}}\\_}') in result
assert ('\\hyperref[\\detokenize{bar:bar-a}]'
'{SECTION\\_\\ref{\\detokenize{bar:bar-a}}\\_}') in result
assert ('\\hyperref[\\detokenize{index:fig1}]{Fig.\\ref{\\detokenize{index:fig1}} '
'\\nameref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]{Sect.\\ref{\\detokenize{foo:foo}} '
'\\nameref{\\detokenize{foo:foo}}}') in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\addto\captionsenglish{\renewcommand{\figurename}{Figure:}}' in result
assert r'\def\fnum@figure{\figurename\thefigure{}.}' in result
assert r'\addto\captionsenglish{\renewcommand{\tablename}{Tab\_}}' in result
assert r'\def\fnum@table{\tablename\thetable{}:}' in result
assert r'\addto\captionsenglish{\renewcommand{\literalblockname}{Code-}}' in result
@pytest.mark.sphinx(
'latex', testroot='numfig',
confoverrides={'numfig': True, 'language': 'ja'})
def test_numref_with_language_ja(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\hyperref[\\detokenize{index:fig1}]'
'{\u56f3 \\ref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{baz:fig22}]'
'{Figure\\ref{\\detokenize{baz:fig22}}}') in result
assert ('\\hyperref[\\detokenize{index:table-1}]'
'{\u8868 \\ref{\\detokenize{index:table-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}') in result
assert ('\\hyperref[\\detokenize{index:code-1}]'
'{\u30ea\u30b9\u30c8 \\ref{\\detokenize{index:code-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:code22}]'
'{Code\\sphinxhyphen{}\\ref{\\detokenize{baz:code22}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]'
'{\\ref{\\detokenize{foo:foo}} \u7ae0}') in result
assert ('\\hyperref[\\detokenize{bar:bar-a}]'
'{\\ref{\\detokenize{bar:bar-a}} \u7ae0}') in result
assert ('\\hyperref[\\detokenize{index:fig1}]{Fig.\\ref{\\detokenize{index:fig1}} '
'\\nameref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]{Sect.\\ref{\\detokenize{foo:foo}} '
'\\nameref{\\detokenize{foo:foo}}}') in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert '\\@iden{\\renewcommand{\\figurename}{図 }}' in result
assert '\\@iden{\\renewcommand{\\tablename}{表 }}' in result
assert '\\@iden{\\renewcommand{\\literalblockname}{リスト}}' in result
@pytest.mark.sphinx('latex', testroot='latex-numfig')
def test_latex_obey_numfig_is_false(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'SphinxManual.tex').read_text()
assert '\\usepackage{sphinx}' in result
result = (app.outdir / 'SphinxHowTo.tex').read_text()
assert '\\usepackage{sphinx}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-numfig',
confoverrides={'numfig': True, 'numfig_secnum_depth': 0})
def test_latex_obey_numfig_secnum_depth_is_zero(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'SphinxManual.tex').read_text()
assert '\\usepackage[,nonumfigreset,mathnumfig]{sphinx}' in result
result = (app.outdir / 'SphinxHowTo.tex').read_text()
assert '\\usepackage[,nonumfigreset,mathnumfig]{sphinx}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-numfig',
confoverrides={'numfig': True, 'numfig_secnum_depth': 2})
def test_latex_obey_numfig_secnum_depth_is_two(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'SphinxManual.tex').read_text()
assert '\\usepackage[,numfigreset=2,mathnumfig]{sphinx}' in result
result = (app.outdir / 'SphinxHowTo.tex').read_text()
assert '\\usepackage[,numfigreset=3,mathnumfig]{sphinx}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-numfig',
confoverrides={'numfig': True, 'math_numfig': False})
def test_latex_obey_numfig_but_math_numfig_false(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'SphinxManual.tex').read_text()
assert '\\usepackage[,numfigreset=1]{sphinx}' in result
result = (app.outdir / 'SphinxHowTo.tex').read_text()
assert '\\usepackage[,numfigreset=2]{sphinx}' in result
@pytest.mark.sphinx('latex', testroot='basic')
def test_latex_add_latex_package(app, status, warning):
app.add_latex_package('foo')
app.add_latex_package('bar', 'baz')
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
assert '\\usepackage{foo}' in result
assert '\\usepackage[baz]{bar}' in result
@pytest.mark.sphinx('latex', testroot='latex-babel')
def test_babel_with_no_language_settings(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,english]{sphinxmanual}' in result
assert '\\usepackage{babel}' in result
assert '\\usepackage{tgtermes}' in result
assert '\\usepackage[Bjarne]{fncychap}' in result
assert ('\\addto\\captionsenglish{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff{"}' in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{page}' in result
assert r'\addto\captionsenglish{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsenglish{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'de'})
def test_babel_with_language_de(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,ngerman]{sphinxmanual}' in result
assert '\\usepackage{babel}' in result
assert '\\usepackage{tgtermes}' in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsngerman{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff{"}' in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{Seite}' in result
assert r'\addto\captionsngerman{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsngerman{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'ru'})
def test_babel_with_language_ru(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,russian]{sphinxmanual}' in result
assert '\\usepackage{babel}' in result
assert '\\usepackage{tgtermes}' not in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsrussian{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff{"}' in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{страница}' in result
assert r'\addto\captionsrussian{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsrussian{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'tr'})
def test_babel_with_language_tr(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,turkish]{sphinxmanual}' in result
assert '\\usepackage{babel}' in result
assert '\\usepackage{tgtermes}' in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsturkish{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff{=}' in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{sayfa}' in result
assert r'\addto\captionsturkish{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsturkish{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'ja'})
def test_babel_with_language_ja(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,dvipdfmx]{sphinxmanual}' in result
assert '\\usepackage{babel}' not in result
assert '\\usepackage{tgtermes}' in result
assert '\\usepackage[Sonny]{fncychap}' not in result
assert '\\renewcommand{\\contentsname}{Table of content}\n' in result
assert '\\shorthandoff' not in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{ページ}' in result
assert '\\@iden{\\renewcommand{\\figurename}{Fig.\\@{} }}' in result
assert '\\@iden{\\renewcommand{\\tablename}{Table.\\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'unknown'})
def test_babel_with_unknown_language(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,english]{sphinxmanual}' in result
assert '\\usepackage{babel}' in result
assert '\\usepackage{tgtermes}' in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsenglish{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff' in result
assert "WARNING: no Babel option known for language 'unknown'" in warning.getvalue()
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{page}' in result
assert r'\addto\captionsenglish{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsenglish{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'de', 'latex_engine': 'lualatex'})
def test_polyglossia_with_language_de(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,german]{sphinxmanual}' in result
assert '\\usepackage{polyglossia}' in result
assert '\\setmainlanguage[spelling=new]{german}' in result
assert '\\usepackage{tgtermes}' not in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsgerman{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff' not in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{Seite}' in result
assert r'\addto\captionsgerman{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsgerman{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'de-1901', 'latex_engine': 'lualatex'})
def test_polyglossia_with_language_de_1901(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,german]{sphinxmanual}' in result
assert '\\usepackage{polyglossia}' in result
assert '\\setmainlanguage[spelling=old]{german}' in result
assert '\\usepackage{tgtermes}' not in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsgerman{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff' not in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{page}' in result
assert r'\addto\captionsgerman{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsgerman{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx('latex')
def test_footnote(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'sphinxtests.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\sphinxstepexplicit %\n\\begin{footnote}[1]\\phantomsection'
'\\label{\\thesphinxscope.1}%\n\\sphinxAtStartFootnote\nnumbered\n%\n'
'\\end{footnote}') in result
assert ('\\begin{footnote}[2]\\sphinxAtStartFootnote\nauto numbered\n%\n'
'\\end{footnote}') in result
assert '\\begin{footnote}[3]\\sphinxAtStartFootnote\nnamed\n%\n\\end{footnote}' in result
assert '\\sphinxcite{footnote:bar}' in result
assert ('\\bibitem[bar]{footnote:bar}\n\\sphinxAtStartPar\ncite\n') in result
assert '\\sphinxcaption{Table caption \\sphinxfootnotemark[4]' in result
assert ('\\hline%\n\\begin{footnotetext}[4]'
'\\phantomsection\\label{\\thesphinxscope.4}%\n'
'\\sphinxAtStartFootnote\n'
'footnote in table caption\n%\n\\end{footnotetext}\\ignorespaces %\n'
'\\begin{footnotetext}[5]'
'\\phantomsection\\label{\\thesphinxscope.5}%\n'
'\\sphinxAtStartFootnote\n'
'footnote in table header\n%\n\\end{footnotetext}\\ignorespaces '
'\n\\sphinxAtStartPar\n'
'VIDIOC\\_CROPCAP\n&\n\\sphinxAtStartPar\n') in result
assert ('Information about VIDIOC\\_CROPCAP %\n'
'\\begin{footnote}[6]\\sphinxAtStartFootnote\n'
'footnote in table not in header\n%\n\\end{footnote}\n\\\\\n\\hline\n'
'\\end{tabulary}\n'
'\\par\n\\sphinxattableend\\end{savenotes}\n') in result
@pytest.mark.sphinx('latex', testroot='footnotes')
def test_reference_in_caption_and_codeblock_in_footnote(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\caption{This is the figure caption with a reference to '
'\\sphinxcite{index:authoryear}.}' in result)
assert '\\chapter{The section with a reference to {[}AuthorYear{]}}' in result
assert ('\\sphinxcaption{The table title with a reference'
' to {[}AuthorYear{]}}' in result)
assert '\\subsubsection*{The rubric title with a reference to {[}AuthorYear{]}}' in result
assert ('\\chapter{The section with a reference to \\sphinxfootnotemark[5]}\n'
'\\label{\\detokenize{index:the-section-with-a-reference-to}}'
'%\n\\begin{footnotetext}[5]'
'\\phantomsection\\label{\\thesphinxscope.5}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote in section\n%\n\\end{footnotetext}') in result
assert ('\\caption{This is the figure caption with a footnote to '
'\\sphinxfootnotemark[7].}\\label{\\detokenize{index:id29}}\\end{figure}\n'
'%\n\\begin{footnotetext}[7]'
'\\phantomsection\\label{\\thesphinxscope.7}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote in caption\n%\n\\end{footnotetext}') in result
assert ('\\sphinxcaption{footnote \\sphinxfootnotemark[8] in '
'caption of normal table}\\label{\\detokenize{index:id30}}') in result
assert ('\\caption{footnote \\sphinxfootnotemark[9] '
'in caption \\sphinxfootnotemark[10] of longtable\\strut}') in result
assert ('\\endlastfoot\n%\n\\begin{footnotetext}[9]'
'\\phantomsection\\label{\\thesphinxscope.9}%\n'
'\\sphinxAtStartFootnote\n'
'Foot note in longtable\n%\n\\end{footnotetext}\\ignorespaces %\n'
'\\begin{footnotetext}[10]'
'\\phantomsection\\label{\\thesphinxscope.10}%\n'
'\\sphinxAtStartFootnote\n'
'Second footnote in caption of longtable\n') in result
assert ('This is a reference to the code\\sphinxhyphen{}block in the footnote:\n'
'{\\hyperref[\\detokenize{index:codeblockinfootnote}]'
'{\\sphinxcrossref{\\DUrole{std,std-ref}{I am in a footnote}}}}') in result
assert ('&\n\\sphinxAtStartPar\nThis is one more footnote with some code in it %\n'
'\\begin{footnote}[11]\\sphinxAtStartFootnote\n'
'Third footnote in longtable\n') in result
assert ('\\end{sphinxVerbatim}\n%\n\\end{footnote}.\n') in result
assert '\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]' in result
@pytest.mark.sphinx(
'latex', testroot='footnotes',
confoverrides={'latex_show_urls': 'inline'})
def test_latex_show_urls_is_inline(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('Same footnote number \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'footnote in bar\n%\n\\end{footnote} in bar.rst') in result
assert ('Auto footnote number %\n\\begin{footnote}[1]\\sphinxAtStartFootnote\n'
'footnote in baz\n%\n\\end{footnote} in baz.rst') in result
assert ('\\phantomsection\\label{\\detokenize{index:id32}}'
'{\\hyperref[\\detokenize{index:the-section'
'-with-a-reference-to-authoryear}]'
'{\\sphinxcrossref{The section with a reference to '
'\\sphinxcite{index:authoryear}}}}') in result
assert ('\\phantomsection\\label{\\detokenize{index:id33}}'
'{\\hyperref[\\detokenize{index:the-section-with-a-reference-to}]'
'{\\sphinxcrossref{The section with a reference to }}}' in result)
assert ('First footnote: %\n\\begin{footnote}[2]\\sphinxAtStartFootnote\n'
'First\n%\n\\end{footnote}') in result
assert ('Second footnote: \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'Second\n%\n\\end{footnote}') in result
assert '\\sphinxhref{http://sphinx-doc.org/}{Sphinx} (http://sphinx\\sphinxhyphen{}doc.org/)' in result
assert ('Third footnote: %\n\\begin{footnote}[3]\\sphinxAtStartFootnote\n'
'Third \\sphinxfootnotemark[4]\n%\n\\end{footnote}%\n'
'\\begin{footnotetext}[4]'
'\\phantomsection\\label{\\thesphinxscope.4}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote inside footnote\n%\n\\end{footnotetext}\\ignorespaces') in result
assert ('\\sphinxhref{http://sphinx-doc.org/~test/}{URL including tilde} '
'(http://sphinx\\sphinxhyphen{}doc.org/\\textasciitilde{}test/)') in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}{URL in term} '
'(http://sphinx\\sphinxhyphen{}doc.org/)}'
'\n\\sphinxAtStartPar\nDescription' in result)
assert ('\\sphinxlineitem{Footnote in term \\sphinxfootnotemark[6]}'
'%\n\\begin{footnotetext}[6]'
'\\phantomsection\\label{\\thesphinxscope.6}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote in term\n%\n\\end{footnotetext}\\ignorespaces '
'\n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}{Term in deflist} '
'(http://sphinx\\sphinxhyphen{}doc.org/)}'
'\n\\sphinxAtStartPar\nDescription') in result
assert '\\sphinxurl{https://github.com/sphinx-doc/sphinx}\n' in result
assert ('\\sphinxhref{mailto:sphinx-dev@googlegroups.com}'
'{sphinx\\sphinxhyphen{}dev@googlegroups.com}') in result
assert '\\begin{savenotes}\\begin{fulllineitems}' not in result
@pytest.mark.sphinx(
'latex', testroot='footnotes',
confoverrides={'latex_show_urls': 'footnote'})
def test_latex_show_urls_is_footnote(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('Same footnote number \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'footnote in bar\n%\n\\end{footnote} in bar.rst') in result
assert ('Auto footnote number %\n\\begin{footnote}[2]\\sphinxAtStartFootnote\n'
'footnote in baz\n%\n\\end{footnote} in baz.rst') in result
assert ('\\phantomsection\\label{\\detokenize{index:id32}}'
'{\\hyperref[\\detokenize{index:the-section-with-a-reference-to-authoryear}]'
'{\\sphinxcrossref{The section with a reference '
'to \\sphinxcite{index:authoryear}}}}') in result
assert ('\\phantomsection\\label{\\detokenize{index:id33}}'
'{\\hyperref[\\detokenize{index:the-section-with-a-reference-to}]'
'{\\sphinxcrossref{The section with a reference to }}}') in result
assert ('First footnote: %\n\\begin{footnote}[3]\\sphinxAtStartFootnote\n'
'First\n%\n\\end{footnote}') in result
assert ('Second footnote: \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'Second\n%\n\\end{footnote}') in result
assert ('\\sphinxhref{http://sphinx-doc.org/}{Sphinx}'
'%\n\\begin{footnote}[4]\\sphinxAtStartFootnote\n'
'\\sphinxnolinkurl{http://sphinx-doc.org/}\n%\n\\end{footnote}') in result
assert ('Third footnote: %\n\\begin{footnote}[6]\\sphinxAtStartFootnote\n'
'Third \\sphinxfootnotemark[7]\n%\n\\end{footnote}%\n'
'\\begin{footnotetext}[7]'
'\\phantomsection\\label{\\thesphinxscope.7}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote inside footnote\n%\n'
'\\end{footnotetext}\\ignorespaces') in result
assert ('\\sphinxhref{http://sphinx-doc.org/~test/}{URL including tilde}'
'%\n\\begin{footnote}[5]\\sphinxAtStartFootnote\n'
'\\sphinxnolinkurl{http://sphinx-doc.org/~test/}\n%\n\\end{footnote}') in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}'
'{URL in term}\\sphinxfootnotemark[9]}'
'%\n\\begin{footnotetext}[9]'
'\\phantomsection\\label{\\thesphinxscope.9}%\n'
'\\sphinxAtStartFootnote\n'
'\\sphinxnolinkurl{http://sphinx-doc.org/}\n%\n'
'\\end{footnotetext}\\ignorespaces \n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxlineitem{Footnote in term \\sphinxfootnotemark[11]}'
'%\n\\begin{footnotetext}[11]'
'\\phantomsection\\label{\\thesphinxscope.11}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote in term\n%\n\\end{footnotetext}\\ignorespaces '
'\n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}{Term in deflist}'
'\\sphinxfootnotemark[10]}'
'%\n\\begin{footnotetext}[10]'
'\\phantomsection\\label{\\thesphinxscope.10}%\n'
'\\sphinxAtStartFootnote\n'
'\\sphinxnolinkurl{http://sphinx-doc.org/}\n%\n'
'\\end{footnotetext}\\ignorespaces \n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxurl{https://github.com/sphinx-doc/sphinx}\n' in result)
assert ('\\sphinxhref{mailto:sphinx-dev@googlegroups.com}'
'{sphinx\\sphinxhyphen{}dev@googlegroups.com}\n') in result
assert '\\begin{savenotes}\\begin{fulllineitems}' in result
@pytest.mark.sphinx(
'latex', testroot='footnotes',
confoverrides={'latex_show_urls': 'no'})
def test_latex_show_urls_is_no(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('Same footnote number \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'footnote in bar\n%\n\\end{footnote} in bar.rst') in result
assert ('Auto footnote number %\n\\begin{footnote}[1]\\sphinxAtStartFootnote\n'
'footnote in baz\n%\n\\end{footnote} in baz.rst') in result
assert ('\\phantomsection\\label{\\detokenize{index:id32}}'
'{\\hyperref[\\detokenize{index:the-section-with-a-reference-to-authoryear}]'
'{\\sphinxcrossref{The section with a reference '
'to \\sphinxcite{index:authoryear}}}}') in result
assert ('\\phantomsection\\label{\\detokenize{index:id33}}'
'{\\hyperref[\\detokenize{index:the-section-with-a-reference-to}]'
'{\\sphinxcrossref{The section with a reference to }}}' in result)
assert ('First footnote: %\n\\begin{footnote}[2]\\sphinxAtStartFootnote\n'
'First\n%\n\\end{footnote}') in result
assert ('Second footnote: \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'Second\n%\n\\end{footnote}') in result
assert '\\sphinxhref{http://sphinx-doc.org/}{Sphinx}' in result
assert ('Third footnote: %\n\\begin{footnote}[3]\\sphinxAtStartFootnote\n'
'Third \\sphinxfootnotemark[4]\n%\n\\end{footnote}%\n'
'\\begin{footnotetext}[4]'
'\\phantomsection\\label{\\thesphinxscope.4}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote inside footnote\n%\n\\end{footnotetext}\\ignorespaces') in result
assert '\\sphinxhref{http://sphinx-doc.org/~test/}{URL including tilde}' in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}{URL in term}}'
'\n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxlineitem{Footnote in term \\sphinxfootnotemark[6]}'
'%\n\\begin{footnotetext}[6]'
'\\phantomsection\\label{\\thesphinxscope.6}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote in term\n%\n\\end{footnotetext}\\ignorespaces '
'\n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}{Term in deflist}}'
'\n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxurl{https://github.com/sphinx-doc/sphinx}\n' in result)
assert ('\\sphinxhref{mailto:sphinx-dev@googlegroups.com}'
'{sphinx\\sphinxhyphen{}dev@googlegroups.com}\n') in result
assert '\\begin{savenotes}\\begin{fulllineitems}' not in result
@pytest.mark.sphinx(
'latex', testroot='footnotes',
confoverrides={'latex_show_urls': 'footnote',
'rst_prolog': '.. |URL| replace:: `text <http://www.example.com/>`__'})
def test_latex_show_urls_footnote_and_substitutions(app, status, warning):
# hyperlinks in substitutions should not effect to make footnotes (refs: #4784)
test_latex_show_urls_is_footnote(app, status, warning)
@pytest.mark.sphinx('latex', testroot='image-in-section')
def test_image_in_section(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\chapter[Test section]{\\lowercase{\\sphinxincludegraphics'
'[width=15bp,height=15bp]}{{pic}.png} Test section}'
in result)
assert ('\\chapter[Other {[}blah{]} section]{Other {[}blah{]} '
'\\lowercase{\\sphinxincludegraphics[width=15bp,height=15bp]}'
'{{pic}.png} section}' in result)
assert ('\\chapter{Another section}' in result)
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={'latex_logo': 'notfound.jpg'})
def test_latex_logo_if_not_found(app, status, warning):
try:
app.builder.build_all()
assert False # SphinxError not raised
except Exception as exc:
assert isinstance(exc, SphinxError)
@pytest.mark.sphinx('latex', testroot='toctree-maxdepth')
def test_toctree_maxdepth_manual(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\setcounter{tocdepth}{1}' in result
assert '\\setcounter{secnumdepth}' not in result
assert '\\chapter{Foo}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_documents': [
('index', 'python.tex', 'Sphinx Tests Documentation',
'Georg Brandl', 'howto'),
]})
def test_toctree_maxdepth_howto(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\setcounter{tocdepth}{2}' in result
assert '\\setcounter{secnumdepth}' not in result
assert '\\section{Foo}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'root_doc': 'foo'})
def test_toctree_not_found(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\setcounter{tocdepth}' not in result
assert '\\setcounter{secnumdepth}' not in result
assert '\\chapter{Foo A}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'root_doc': 'bar'})
def test_toctree_without_maxdepth(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\setcounter{tocdepth}' not in result
assert '\\setcounter{secnumdepth}' not in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'root_doc': 'qux'})
def test_toctree_with_deeper_maxdepth(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\setcounter{tocdepth}{3}' in result
assert '\\setcounter{secnumdepth}{3}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': None})
def test_latex_toplevel_sectioning_is_None(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\chapter{Foo}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'part'})
def test_latex_toplevel_sectioning_is_part(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\part{Foo}' in result
assert '\\chapter{Foo A}' in result
assert '\\chapter{Foo B}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'part',
'latex_documents': [
('index', 'python.tex', 'Sphinx Tests Documentation',
'Georg Brandl', 'howto')
]})
def test_latex_toplevel_sectioning_is_part_with_howto(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\part{Foo}' in result
assert '\\section{Foo A}' in result
assert '\\section{Foo B}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'chapter'})
def test_latex_toplevel_sectioning_is_chapter(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\chapter{Foo}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'chapter',
'latex_documents': [
('index', 'python.tex', 'Sphinx Tests Documentation',
'Georg Brandl', 'howto')
]})
def test_latex_toplevel_sectioning_is_chapter_with_howto(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\section{Foo}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'section'})
def test_latex_toplevel_sectioning_is_section(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\section{Foo}' in result
@skip_if_stylefiles_notfound
@pytest.mark.sphinx('latex', testroot='maxlistdepth')
def test_maxlistdepth_at_ten(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
compile_latex_document(app, 'python.tex')
@pytest.mark.sphinx('latex', testroot='latex-table')
@pytest.mark.test_params(shared_result='latex-table')
def test_latex_table_tabulars(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
tables = {}
for chap in re.split(r'\\(?:section|chapter){', result)[1:]:
sectname, content = chap.split('}', 1)
tables[sectname] = content.strip()
def get_expected(name):
return (app.srcdir / 'expects' / (name + '.tex')).read_text().strip()
# simple_table
actual = tables['simple table']
expected = get_expected('simple_table')
assert actual == expected
# table having :widths: option
actual = tables['table having :widths: option']
expected = get_expected('table_having_widths')
assert actual == expected
# table having :align: option (tabulary)
actual = tables['table having :align: option (tabulary)']
expected = get_expected('tabulary_having_widths')
assert actual == expected
# table having :align: option (tabular)
actual = tables['table having :align: option (tabular)']
expected = get_expected('tabular_having_widths')
assert actual == expected
# table with tabularcolumn
actual = tables['table with tabularcolumn']
expected = get_expected('tabularcolumn')
assert actual == expected
# table with cell in first column having three paragraphs
actual = tables['table with cell in first column having three paragraphs']
expected = get_expected('table_having_threeparagraphs_cell_in_first_col')
assert actual == expected
# table having caption
actual = tables['table having caption']
expected = get_expected('table_having_caption')
assert actual == expected
# table having verbatim
actual = tables['table having verbatim']
expected = get_expected('table_having_verbatim')
assert actual == expected
# table having problematic cell
actual = tables['table having problematic cell']
expected = get_expected('table_having_problematic_cell')
assert actual == expected
# table having both :widths: and problematic cell
actual = tables['table having both :widths: and problematic cell']
expected = get_expected('table_having_widths_and_problematic_cell')
assert actual == expected
# table having both stub columns and problematic cell
actual = tables['table having both stub columns and problematic cell']
expected = get_expected('table_having_stub_columns_and_problematic_cell')
assert actual == expected
@pytest.mark.sphinx('latex', testroot='latex-table')
@pytest.mark.test_params(shared_result='latex-table')
def test_latex_table_longtable(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
tables = {}
for chap in re.split(r'\\(?:section|chapter){', result)[1:]:
sectname, content = chap.split('}', 1)
tables[sectname] = content.strip()
def get_expected(name):
return (app.srcdir / 'expects' / (name + '.tex')).read_text().strip()
# longtable
actual = tables['longtable']
expected = get_expected('longtable')
assert actual == expected
# longtable having :widths: option
actual = tables['longtable having :widths: option']
expected = get_expected('longtable_having_widths')
assert actual == expected
# longtable having :align: option
actual = tables['longtable having :align: option']
expected = get_expected('longtable_having_align')
assert actual == expected
# longtable with tabularcolumn
actual = tables['longtable with tabularcolumn']
expected = get_expected('longtable_with_tabularcolumn')
assert actual == expected
# longtable having caption
actual = tables['longtable having caption']
expected = get_expected('longtable_having_caption')
assert actual == expected
# longtable having verbatim
actual = tables['longtable having verbatim']
expected = get_expected('longtable_having_verbatim')
assert actual == expected
# longtable having problematic cell
actual = tables['longtable having problematic cell']
expected = get_expected('longtable_having_problematic_cell')
assert actual == expected
# longtable having both :widths: and problematic cell
actual = tables['longtable having both :widths: and problematic cell']
expected = get_expected('longtable_having_widths_and_problematic_cell')
assert actual == expected
# longtable having both stub columns and problematic cell
actual = tables['longtable having both stub columns and problematic cell']
expected = get_expected('longtable_having_stub_columns_and_problematic_cell')
assert actual == expected
@pytest.mark.sphinx('latex', testroot='latex-table')
@pytest.mark.test_params(shared_result='latex-table')
def test_latex_table_complex_tables(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
tables = {}
for chap in re.split(r'\\(?:section|renewcommand){', result)[1:]:
sectname, content = chap.split('}', 1)
tables[sectname] = content.strip()
def get_expected(name):
return (app.srcdir / 'expects' / (name + '.tex')).read_text().strip()
# grid table
actual = tables['grid table']
expected = get_expected('gridtable')
assert actual == expected
# complex spanning cell
actual = tables['complex spanning cell']
expected = get_expected('complex_spanning_cell')
assert actual == expected
@pytest.mark.sphinx('latex', testroot='latex-table',
confoverrides={'templates_path': ['_mytemplates/latex']})
def test_latex_table_custom_template_caseA(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert 'SALUT LES COPAINS' in result
@pytest.mark.sphinx('latex', testroot='latex-table',
confoverrides={'templates_path': ['_mytemplates']})
def test_latex_table_custom_template_caseB(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert 'SALUT LES COPAINS' not in result
@pytest.mark.sphinx('latex', testroot='latex-table')
@pytest.mark.test_params(shared_result='latex-table')
def test_latex_table_custom_template_caseC(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert 'SALUT LES COPAINS' not in result
@pytest.mark.sphinx('latex', testroot='directives-raw')
def test_latex_raw_directive(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
# standard case
assert 'standalone raw directive (HTML)' not in result
assert ('\\label{\\detokenize{index:id1}}\n'
'standalone raw directive (LaTeX)' in result)
# with substitution
assert 'HTML: abc ghi' in result
assert 'LaTeX: abc def ghi' in result
@pytest.mark.sphinx('latex', testroot='images')
def test_latex_images(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
# images are copied
assert '\\sphinxincludegraphics{{python-logo}.png}' in result
assert (app.outdir / 'python-logo.png').exists()
# not found images
assert '\\sphinxincludegraphics{{NOT_EXIST}.PNG}' not in result
assert ('WARNING: Could not fetch remote image: '
'https://www.google.com/NOT_EXIST.PNG [404]' in warning.getvalue())
# an image having target
assert ('\\sphinxhref{https://www.sphinx-doc.org/}'
'{\\sphinxincludegraphics{{rimg}.png}}\n\n' in result)
# a centerized image having target
assert ('\\sphinxhref{https://www.python.org/}{{\\hspace*{\\fill}'
'\\sphinxincludegraphics{{rimg}.png}\\hspace*{\\fill}}}\n\n' in result)
@pytest.mark.sphinx('latex', testroot='latex-index')
def test_latex_index(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert ('A \\index{famous@\\spxentry{famous}}famous '
'\\index{equation@\\spxentry{equation}}equation:\n' in result)
assert ('\n\\index{Einstein@\\spxentry{Einstein}}'
'\\index{relativity@\\spxentry{relativity}}'
'\\ignorespaces \n\\sphinxAtStartPar\nand') in result
assert ('\n\\index{main \\sphinxleftcurlybrace{}@\\spxentry{'
'main \\sphinxleftcurlybrace{}}}\\ignorespaces ' in result)
@pytest.mark.sphinx('latex', testroot='latex-equations')
def test_latex_equations(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
expected = (app.srcdir / 'expects' / 'latex-equations.tex').read_text().strip()
assert expected in result
@pytest.mark.sphinx('latex', testroot='image-in-parsed-literal')
def test_latex_image_in_parsed_literal(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert ('{\\sphinxunactivateextrasandspace \\raisebox{-0.5\\height}'
'{\\sphinxincludegraphics[height=2.00000cm]{{pic}.png}}'
'}AFTER') in result
@pytest.mark.sphinx('latex', testroot='nested-enumerated-list')
def test_latex_nested_enumerated_list(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert ('\\sphinxsetlistlabels{\\arabic}{enumi}{enumii}{}{.}%\n'
'\\setcounter{enumi}{4}\n' in result)
assert ('\\sphinxsetlistlabels{\\alph}{enumii}{enumiii}{}{.}%\n'
'\\setcounter{enumii}{3}\n' in result)
assert ('\\sphinxsetlistlabels{\\arabic}{enumiii}{enumiv}{}{)}%\n'
'\\setcounter{enumiii}{9}\n' in result)
assert ('\\sphinxsetlistlabels{\\arabic}{enumiv}{enumv}{(}{)}%\n'
'\\setcounter{enumiv}{23}\n' in result)
assert ('\\sphinxsetlistlabels{\\roman}{enumii}{enumiii}{}{.}%\n'
'\\setcounter{enumii}{2}\n' in result)
@pytest.mark.sphinx('latex', testroot='footnotes')
def test_latex_thebibliography(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
assert ('\\begin{sphinxthebibliography}{AuthorYe}\n'
'\\bibitem[AuthorYear]{index:authoryear}\n\\sphinxAtStartPar\n'
'Author, Title, Year\n'
'\\end{sphinxthebibliography}\n' in result)
assert '\\sphinxcite{index:authoryear}' in result
@pytest.mark.sphinx('latex', testroot='glossary')
def test_latex_glossary(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert (r'\sphinxlineitem{ähnlich\index{ähnlich@\spxentry{ähnlich}|spxpagem}'
r'\phantomsection'
r'\label{\detokenize{index:term-ahnlich}}}' in result)
assert (r'\sphinxlineitem{boson\index{boson@\spxentry{boson}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-boson}}}' in result)
assert (r'\sphinxlineitem{\sphinxstyleemphasis{fermion}'
r'\index{fermion@\spxentry{fermion}|spxpagem}'
r'\phantomsection'
r'\label{\detokenize{index:term-fermion}}}' in result)
assert (r'\sphinxlineitem{tauon\index{tauon@\spxentry{tauon}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-tauon}}}'
r'\sphinxlineitem{myon\index{myon@\spxentry{myon}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-myon}}}'
r'\sphinxlineitem{electron\index{electron@\spxentry{electron}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-electron}}}' in result)
assert (r'\sphinxlineitem{über\index{über@\spxentry{über}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-uber}}}' in result)
@pytest.mark.sphinx('latex', testroot='latex-labels')
def test_latex_labels(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
# figures
assert (r'\caption{labeled figure}'
r'\label{\detokenize{index:id1}}'
r'\label{\detokenize{index:figure2}}'
r'\label{\detokenize{index:figure1}}'
r'\end{figure}' in result)
assert (r'\caption{labeled figure}'
'\\label{\\detokenize{index:figure3}}\n'
'\\begin{sphinxlegend}\n\\sphinxAtStartPar\n'
'with a legend\n\\end{sphinxlegend}\n'
r'\end{figure}' in result)
# code-blocks
assert (r'\def\sphinxLiteralBlockLabel{'
r'\label{\detokenize{index:codeblock2}}'
r'\label{\detokenize{index:codeblock1}}}' in result)
assert (r'\def\sphinxLiteralBlockLabel{'
r'\label{\detokenize{index:codeblock3}}}' in result)
# tables
assert (r'\sphinxcaption{table caption}'
r'\label{\detokenize{index:id2}}'
r'\label{\detokenize{index:table2}}'
r'\label{\detokenize{index:table1}}' in result)
assert (r'\sphinxcaption{table caption}'
r'\label{\detokenize{index:table3}}' in result)
# sections
assert ('\\chapter{subsection}\n'
r'\label{\detokenize{index:subsection}}'
r'\label{\detokenize{index:section2}}'
r'\label{\detokenize{index:section1}}' in result)
assert ('\\section{subsubsection}\n'
r'\label{\detokenize{index:subsubsection}}'
r'\label{\detokenize{index:section3}}' in result)
assert ('\\subsection{otherdoc}\n'
r'\label{\detokenize{otherdoc:otherdoc}}'
r'\label{\detokenize{otherdoc::doc}}' in result)
# Embedded standalone hyperlink reference (refs: #5948)
assert result.count(r'\label{\detokenize{index:section1}}') == 1
@pytest.mark.sphinx('latex', testroot='latex-figure-in-admonition')
def test_latex_figure_in_admonition(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert(r'\begin{figure}[H]' in result)
def test_default_latex_documents():
from sphinx.util import texescape
texescape.init()
config = Config({'root_doc': 'index',
'project': 'STASI™ Documentation',
'author': "Wolfgang Schäuble & G'Beckstein."})
config.init_values()
config.add('latex_engine', None, True, None)
config.add('latex_theme', 'manual', True, None)
expected = [('index', 'stasi.tex', 'STASI™ Documentation',
r"Wolfgang Schäuble \& G\textquotesingle{}Beckstein.\@{}", 'manual')]
assert default_latex_documents(config) == expected
@skip_if_requested
@skip_if_stylefiles_notfound
@pytest.mark.sphinx('latex', testroot='latex-includegraphics')
def test_includegraphics_oversized(app, status, warning):
app.builder.build_all()
print(status.getvalue())
print(warning.getvalue())
compile_latex_document(app)
@pytest.mark.sphinx('latex', testroot='index_on_title')
def test_index_on_title(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert ('\\chapter{Test for index in top level title}\n'
'\\label{\\detokenize{contents:test-for-index-in-top-level-title}}'
'\\index{index@\\spxentry{index}}\n'
in result)
@pytest.mark.sphinx('latex', testroot='latex-unicode',
confoverrides={'latex_engine': 'pdflatex'})
def test_texescape_for_non_unicode_supported_engine(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
assert 'script small e: e' in result
assert 'double struck italic small i: i' in result
assert r'superscript: \(\sp{\text{0}}\), \(\sp{\text{1}}\)' in result
assert r'subscript: \(\sb{\text{0}}\), \(\sb{\text{1}}\)' in result
@pytest.mark.sphinx('latex', testroot='latex-unicode',
confoverrides={'latex_engine': 'xelatex'})
def test_texescape_for_unicode_supported_engine(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
assert 'script small e: e' in result
assert 'double struck italic small i: i' in result
assert 'superscript: ⁰, ¹' in result
assert 'subscript: ₀, ₁' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={'latex_elements': {'extrapackages': r'\usepackage{foo}'}})
def test_latex_elements_extrapackages(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
assert r'\usepackage{foo}' in result
@pytest.mark.sphinx('latex', testroot='nested-tables')
def test_latex_nested_tables(app, status, warning):
app.builder.build_all()
assert '' == warning.getvalue()
@pytest.mark.sphinx('latex', testroot='latex-container')
def test_latex_container(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert r'\begin{sphinxuseclass}{classname}' in result
assert r'\end{sphinxuseclass}' in result
| 42.731056 | 107 | 0.654592 |
import os
import re
import subprocess
from itertools import product
from shutil import copyfile
from subprocess import PIPE, CalledProcessError
import pytest
from sphinx.builders.latex import default_latex_documents
from sphinx.config import Config
from sphinx.errors import SphinxError
from sphinx.testing.util import strip_escseq
from sphinx.util.osutil import cd, ensuredir
from sphinx.writers.latex import LaTeXTranslator
from .test_build_html import ENV_WARNINGS
LATEX_ENGINES = ['pdflatex', 'lualatex', 'xelatex']
DOCCLASSES = ['howto', 'manual']
STYLEFILES = ['article.cls', 'fancyhdr.sty', 'titlesec.sty', 'amsmath.sty',
'framed.sty', 'color.sty', 'fancyvrb.sty',
'fncychap.sty', 'geometry.sty', 'kvoptions.sty', 'hyperref.sty']
LATEX_WARNINGS = ENV_WARNINGS + """\
%(root)s/index.rst:\\d+: WARNING: unknown option: &option
%(root)s/index.rst:\\d+: WARNING: citation not found: missing
%(root)s/index.rst:\\d+: WARNING: a suitable image for latex builder not found: foo.\\*
%(root)s/index.rst:\\d+: WARNING: Could not lex literal_block as "c". Highlighting skipped.
"""
def kpsetest(*filenames):
try:
subprocess.run(['kpsewhich'] + list(filenames), stdout=PIPE, stderr=PIPE, check=True)
return True
except (OSError, CalledProcessError):
return False
def compile_latex_document(app, filename='python.tex'):
try:
with cd(app.outdir):
ensuredir(app.config.latex_engine)
copyfile(filename, app.config.latex_engine + '/' + filename)
args = [app.config.latex_engine,
'--halt-on-error',
'--interaction=nonstopmode',
'-output-directory=%s' % app.config.latex_engine,
filename]
subprocess.run(args, stdout=PIPE, stderr=PIPE, check=True)
except OSError as exc:
raise pytest.skip.Exception from exc
except CalledProcessError as exc:
print(exc.stdout)
print(exc.stderr)
assert False, '%s exited with return code %s' % (app.config.latex_engine,
exc.returncode)
def skip_if_requested(testfunc):
if 'SKIP_LATEX_BUILD' in os.environ:
msg = 'Skip LaTeX builds because SKIP_LATEX_BUILD is set'
return pytest.mark.skipif(True, reason=msg)(testfunc)
else:
return testfunc
def skip_if_stylefiles_notfound(testfunc):
if kpsetest(*STYLEFILES) is False:
msg = 'not running latex, the required styles do not seem to be installed'
return pytest.mark.skipif(True, reason=msg)(testfunc)
else:
return testfunc
@skip_if_requested
@skip_if_stylefiles_notfound
@pytest.mark.parametrize(
"engine,docclass",
product(LATEX_ENGINES, DOCCLASSES),
)
@pytest.mark.sphinx('latex')
def test_build_latex_doc(app, status, warning, engine, docclass):
app.config.latex_engine = engine
app.config.latex_documents = [app.config.latex_documents[0][:4] + (docclass,)]
app.builder.init()
LaTeXTranslator.ignore_missing_images = True
app.builder.build_all()
assert (app.outdir / 'svgimg.svg').isfile()
compile_latex_document(app, 'sphinxtests.tex')
@pytest.mark.sphinx('latex')
def test_writer(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'sphinxtests.tex').read_text()
assert ('\\begin{sphinxfigure-in-table}\n\\centering\n\\capstart\n'
'\\noindent\\sphinxincludegraphics{{img}.png}\n'
'\\sphinxfigcaption{figure in table}\\label{\\detokenize{markup:id8}}'
'\\end{sphinxfigure-in-table}\\relax' in result)
assert ('\\begin{wrapfigure}{r}{0pt}\n\\centering\n'
'\\noindent\\sphinxincludegraphics{{rimg}.png}\n'
'\\caption{figure with align option}\\label{\\detokenize{markup:id9}}'
'\\end{wrapfigure}' in result)
assert ('\\begin{wrapfigure}{r}{0.500\\linewidth}\n\\centering\n'
'\\noindent\\sphinxincludegraphics{{rimg}.png}\n'
'\\caption{figure with align \\& figwidth option}'
'\\label{\\detokenize{markup:id10}}'
'\\end{wrapfigure}' in result)
assert ('\\begin{wrapfigure}{r}{3cm}\n\\centering\n'
'\\noindent\\sphinxincludegraphics[width=3cm]{{rimg}.png}\n'
'\\caption{figure with align \\& width option}'
'\\label{\\detokenize{markup:id11}}'
'\\end{wrapfigure}' in result)
assert 'Footnotes' not in result
@pytest.mark.sphinx('latex', testroot='warnings', freshenv=True)
def test_latex_warnings(app, status, warning):
app.builder.build_all()
warnings = strip_escseq(re.sub(re.escape(os.sep) + '{1,2}', '/', warning.getvalue()))
warnings_exp = LATEX_WARNINGS % {
'root': re.escape(app.srcdir.replace(os.sep, '/'))}
assert re.match(warnings_exp + '$', warnings), \
'Warnings don\'t match:\n' + \
'--- Expected (regex):\n' + warnings_exp + \
'--- Got:\n' + warnings
@pytest.mark.sphinx('latex', testroot='basic')
def test_latex_basic(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert r'\title{The basic Sphinx documentation for testing}' in result
assert r'\release{}' in result
assert r'\renewcommand{\releasename}{}' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={
'latex_documents': [('index', 'test.tex', 'title', 'author', 'manual')]
})
def test_latex_basic_manual(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{report}' in result
assert r'\documentclass[letterpaper,10pt,english]{sphinxmanual}' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={
'latex_documents': [('index', 'test.tex', 'title', 'author', 'howto')]
})
def test_latex_basic_howto(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{article}' in result
assert r'\documentclass[letterpaper,10pt,english]{sphinxhowto}' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={
'language': 'ja',
'latex_documents': [('index', 'test.tex', 'title', 'author', 'manual')]
})
def test_latex_basic_manual_ja(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{ujbook}' in result
assert r'\documentclass[letterpaper,10pt,dvipdfmx]{sphinxmanual}' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={
'language': 'ja',
'latex_documents': [('index', 'test.tex', 'title', 'author', 'howto')]
})
def test_latex_basic_howto_ja(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{ujreport}' in result
assert r'\documentclass[letterpaper,10pt,dvipdfmx]{sphinxhowto}' in result
@pytest.mark.sphinx('latex', testroot='latex-theme')
def test_latex_theme(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{book}' in result
assert r'\documentclass[a4paper,12pt,english]{sphinxbook}' in result
@pytest.mark.sphinx('latex', testroot='latex-theme',
confoverrides={'latex_elements': {'papersize': 'b5paper',
'pointsize': '9pt'}})
def test_latex_theme_papersize(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{book}' in result
assert r'\documentclass[b5paper,9pt,english]{sphinxbook}' in result
@pytest.mark.sphinx('latex', testroot='latex-theme',
confoverrides={'latex_theme_options': {'papersize': 'b5paper',
'pointsize': '9pt'}})
def test_latex_theme_options(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text(encoding='utf8')
print(result)
assert r'\def\sphinxdocclass{book}' in result
assert r'\documentclass[b5paper,9pt,english]{sphinxbook}' in result
@pytest.mark.sphinx('latex', testroot='basic', confoverrides={'language': 'zh'})
def test_latex_additional_settings_for_language_code(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert r'\usepackage{xeCJK}' in result
@pytest.mark.sphinx('latex', testroot='basic', confoverrides={'language': 'el'})
def test_latex_additional_settings_for_greek(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\usepackage{polyglossia}\n\\setmainlanguage{greek}' in result
assert '\\newfontfamily\\greekfonttt{FreeMono}' in result
@pytest.mark.sphinx('latex', testroot='latex-title')
def test_latex_title_after_admonitions(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\title{test\\sphinxhyphen{}latex\\sphinxhyphen{}title}' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={'release': '1.0_0'})
def test_latex_release(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert r'\release{1.0\_0}' in result
assert r'\renewcommand{\releasename}{Release}' in result
@pytest.mark.sphinx('latex', testroot='numfig',
confoverrides={'numfig': True})
def test_numref(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\hyperref[\\detokenize{index:fig1}]'
'{Fig.\\@ \\ref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{baz:fig22}]'
'{Figure\\ref{\\detokenize{baz:fig22}}}') in result
assert ('\\hyperref[\\detokenize{index:table-1}]'
'{Table \\ref{\\detokenize{index:table-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}') in result
assert ('\\hyperref[\\detokenize{index:code-1}]'
'{Listing \\ref{\\detokenize{index:code-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:code22}]'
'{Code\\sphinxhyphen{}\\ref{\\detokenize{baz:code22}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]'
'{Section \\ref{\\detokenize{foo:foo}}}') in result
assert ('\\hyperref[\\detokenize{bar:bar-a}]'
'{Section \\ref{\\detokenize{bar:bar-a}}}') in result
assert ('\\hyperref[\\detokenize{index:fig1}]{Fig.\\ref{\\detokenize{index:fig1}} '
'\\nameref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]{Sect.\\ref{\\detokenize{foo:foo}} '
'\\nameref{\\detokenize{foo:foo}}}') in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\addto\captionsenglish{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsenglish{\renewcommand{\tablename}{Table }}' in result
assert r'\addto\captionsenglish{\renewcommand{\literalblockname}{Listing}}' in result
@pytest.mark.sphinx(
'latex', testroot='numfig',
confoverrides={'numfig': True,
'numfig_format': {'figure': 'Figure:%s',
'table': 'Tab_%s',
'code-block': 'Code-%s',
'section': 'SECTION-%s'}})
def test_numref_with_prefix1(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\ref{\\detokenize{index:fig1}}' in result
assert '\\ref{\\detokenize{baz:fig22}}' in result
assert '\\ref{\\detokenize{index:table-1}}' in result
assert '\\ref{\\detokenize{baz:table22}}' in result
assert '\\ref{\\detokenize{index:code-1}}' in result
assert '\\ref{\\detokenize{baz:code22}}' in result
assert ('\\hyperref[\\detokenize{index:fig1}]'
'{Figure:\\ref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{baz:fig22}]'
'{Figure\\ref{\\detokenize{baz:fig22}}}') in result
assert ('\\hyperref[\\detokenize{index:table-1}]'
'{Tab\\_\\ref{\\detokenize{index:table-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}') in result
assert ('\\hyperref[\\detokenize{index:code-1}]'
'{Code\\sphinxhyphen{}\\ref{\\detokenize{index:code-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:code22}]'
'{Code\\sphinxhyphen{}\\ref{\\detokenize{baz:code22}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]'
'{SECTION\\sphinxhyphen{}\\ref{\\detokenize{foo:foo}}}') in result
assert ('\\hyperref[\\detokenize{bar:bar-a}]'
'{SECTION\\sphinxhyphen{}\\ref{\\detokenize{bar:bar-a}}}') in result
assert ('\\hyperref[\\detokenize{index:fig1}]{Fig.\\ref{\\detokenize{index:fig1}} '
'\\nameref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]{Sect.\\ref{\\detokenize{foo:foo}} '
'\\nameref{\\detokenize{foo:foo}}}') in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\addto\captionsenglish{\renewcommand{\figurename}{Figure:}}' in result
assert r'\addto\captionsenglish{\renewcommand{\tablename}{Tab\_}}' in result
assert r'\addto\captionsenglish{\renewcommand{\literalblockname}{Code-}}' in result
@pytest.mark.sphinx(
'latex', testroot='numfig',
confoverrides={'numfig': True,
'numfig_format': {'figure': 'Figure:%s.',
'table': 'Tab_%s:',
'code-block': 'Code-%s | ',
'section': 'SECTION_%s_'}})
def test_numref_with_prefix2(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\hyperref[\\detokenize{index:fig1}]'
'{Figure:\\ref{\\detokenize{index:fig1}}.\\@}') in result
assert ('\\hyperref[\\detokenize{baz:fig22}]'
'{Figure\\ref{\\detokenize{baz:fig22}}}') in result
assert ('\\hyperref[\\detokenize{index:table-1}]'
'{Tab\\_\\ref{\\detokenize{index:table-1}}:}') in result
assert ('\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}') in result
assert ('\\hyperref[\\detokenize{index:code-1}]{Code\\sphinxhyphen{}\\ref{\\detokenize{index:code-1}} '
'| }') in result
assert ('\\hyperref[\\detokenize{baz:code22}]'
'{Code\\sphinxhyphen{}\\ref{\\detokenize{baz:code22}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]'
'{SECTION\\_\\ref{\\detokenize{foo:foo}}\\_}') in result
assert ('\\hyperref[\\detokenize{bar:bar-a}]'
'{SECTION\\_\\ref{\\detokenize{bar:bar-a}}\\_}') in result
assert ('\\hyperref[\\detokenize{index:fig1}]{Fig.\\ref{\\detokenize{index:fig1}} '
'\\nameref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]{Sect.\\ref{\\detokenize{foo:foo}} '
'\\nameref{\\detokenize{foo:foo}}}') in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\addto\captionsenglish{\renewcommand{\figurename}{Figure:}}' in result
assert r'\def\fnum@figure{\figurename\thefigure{}.}' in result
assert r'\addto\captionsenglish{\renewcommand{\tablename}{Tab\_}}' in result
assert r'\def\fnum@table{\tablename\thetable{}:}' in result
assert r'\addto\captionsenglish{\renewcommand{\literalblockname}{Code-}}' in result
@pytest.mark.sphinx(
'latex', testroot='numfig',
confoverrides={'numfig': True, 'language': 'ja'})
def test_numref_with_language_ja(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\hyperref[\\detokenize{index:fig1}]'
'{\u56f3 \\ref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{baz:fig22}]'
'{Figure\\ref{\\detokenize{baz:fig22}}}') in result
assert ('\\hyperref[\\detokenize{index:table-1}]'
'{\u8868 \\ref{\\detokenize{index:table-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}') in result
assert ('\\hyperref[\\detokenize{index:code-1}]'
'{\u30ea\u30b9\u30c8 \\ref{\\detokenize{index:code-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:code22}]'
'{Code\\sphinxhyphen{}\\ref{\\detokenize{baz:code22}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]'
'{\\ref{\\detokenize{foo:foo}} \u7ae0}') in result
assert ('\\hyperref[\\detokenize{bar:bar-a}]'
'{\\ref{\\detokenize{bar:bar-a}} \u7ae0}') in result
assert ('\\hyperref[\\detokenize{index:fig1}]{Fig.\\ref{\\detokenize{index:fig1}} '
'\\nameref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]{Sect.\\ref{\\detokenize{foo:foo}} '
'\\nameref{\\detokenize{foo:foo}}}') in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert '\\@iden{\\renewcommand{\\figurename}{図 }}' in result
assert '\\@iden{\\renewcommand{\\tablename}{表 }}' in result
assert '\\@iden{\\renewcommand{\\literalblockname}{リスト}}' in result
@pytest.mark.sphinx('latex', testroot='latex-numfig')
def test_latex_obey_numfig_is_false(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'SphinxManual.tex').read_text()
assert '\\usepackage{sphinx}' in result
result = (app.outdir / 'SphinxHowTo.tex').read_text()
assert '\\usepackage{sphinx}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-numfig',
confoverrides={'numfig': True, 'numfig_secnum_depth': 0})
def test_latex_obey_numfig_secnum_depth_is_zero(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'SphinxManual.tex').read_text()
assert '\\usepackage[,nonumfigreset,mathnumfig]{sphinx}' in result
result = (app.outdir / 'SphinxHowTo.tex').read_text()
assert '\\usepackage[,nonumfigreset,mathnumfig]{sphinx}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-numfig',
confoverrides={'numfig': True, 'numfig_secnum_depth': 2})
def test_latex_obey_numfig_secnum_depth_is_two(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'SphinxManual.tex').read_text()
assert '\\usepackage[,numfigreset=2,mathnumfig]{sphinx}' in result
result = (app.outdir / 'SphinxHowTo.tex').read_text()
assert '\\usepackage[,numfigreset=3,mathnumfig]{sphinx}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-numfig',
confoverrides={'numfig': True, 'math_numfig': False})
def test_latex_obey_numfig_but_math_numfig_false(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'SphinxManual.tex').read_text()
assert '\\usepackage[,numfigreset=1]{sphinx}' in result
result = (app.outdir / 'SphinxHowTo.tex').read_text()
assert '\\usepackage[,numfigreset=2]{sphinx}' in result
@pytest.mark.sphinx('latex', testroot='basic')
def test_latex_add_latex_package(app, status, warning):
app.add_latex_package('foo')
app.add_latex_package('bar', 'baz')
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
assert '\\usepackage{foo}' in result
assert '\\usepackage[baz]{bar}' in result
@pytest.mark.sphinx('latex', testroot='latex-babel')
def test_babel_with_no_language_settings(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,english]{sphinxmanual}' in result
assert '\\usepackage{babel}' in result
assert '\\usepackage{tgtermes}' in result
assert '\\usepackage[Bjarne]{fncychap}' in result
assert ('\\addto\\captionsenglish{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff{"}' in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{page}' in result
assert r'\addto\captionsenglish{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsenglish{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'de'})
def test_babel_with_language_de(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,ngerman]{sphinxmanual}' in result
assert '\\usepackage{babel}' in result
assert '\\usepackage{tgtermes}' in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsngerman{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff{"}' in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{Seite}' in result
assert r'\addto\captionsngerman{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsngerman{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'ru'})
def test_babel_with_language_ru(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,russian]{sphinxmanual}' in result
assert '\\usepackage{babel}' in result
assert '\\usepackage{tgtermes}' not in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsrussian{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff{"}' in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{страница}' in result
assert r'\addto\captionsrussian{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsrussian{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'tr'})
def test_babel_with_language_tr(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,turkish]{sphinxmanual}' in result
assert '\\usepackage{babel}' in result
assert '\\usepackage{tgtermes}' in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsturkish{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff{=}' in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{sayfa}' in result
assert r'\addto\captionsturkish{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsturkish{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'ja'})
def test_babel_with_language_ja(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,dvipdfmx]{sphinxmanual}' in result
assert '\\usepackage{babel}' not in result
assert '\\usepackage{tgtermes}' in result
assert '\\usepackage[Sonny]{fncychap}' not in result
assert '\\renewcommand{\\contentsname}{Table of content}\n' in result
assert '\\shorthandoff' not in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{ページ}' in result
assert '\\@iden{\\renewcommand{\\figurename}{Fig.\\@{} }}' in result
assert '\\@iden{\\renewcommand{\\tablename}{Table.\\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'unknown'})
def test_babel_with_unknown_language(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,english]{sphinxmanual}' in result
assert '\\usepackage{babel}' in result
assert '\\usepackage{tgtermes}' in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsenglish{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff' in result
assert "WARNING: no Babel option known for language 'unknown'" in warning.getvalue()
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{page}' in result
assert r'\addto\captionsenglish{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsenglish{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'de', 'latex_engine': 'lualatex'})
def test_polyglossia_with_language_de(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,german]{sphinxmanual}' in result
assert '\\usepackage{polyglossia}' in result
assert '\\setmainlanguage[spelling=new]{german}' in result
assert '\\usepackage{tgtermes}' not in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsgerman{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff' not in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{Seite}' in result
assert r'\addto\captionsgerman{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsgerman{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
confoverrides={'language': 'de-1901', 'latex_engine': 'lualatex'})
def test_polyglossia_with_language_de_1901(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\documentclass[letterpaper,10pt,german]{sphinxmanual}' in result
assert '\\usepackage{polyglossia}' in result
assert '\\setmainlanguage[spelling=old]{german}' in result
assert '\\usepackage{tgtermes}' not in result
assert '\\usepackage[Sonny]{fncychap}' in result
assert ('\\addto\\captionsgerman{\\renewcommand{\\contentsname}{Table of content}}\n'
in result)
assert '\\shorthandoff' not in result
# sphinxmessages.sty
result = (app.outdir / 'sphinxmessages.sty').read_text()
print(result)
assert r'\def\pageautorefname{page}' in result
assert r'\addto\captionsgerman{\renewcommand{\figurename}{Fig.\@{} }}' in result
assert r'\addto\captionsgerman{\renewcommand{\tablename}{Table.\@{} }}' in result
@pytest.mark.sphinx('latex')
def test_footnote(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'sphinxtests.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\sphinxstepexplicit %\n\\begin{footnote}[1]\\phantomsection'
'\\label{\\thesphinxscope.1}%\n\\sphinxAtStartFootnote\nnumbered\n%\n'
'\\end{footnote}') in result
assert ('\\begin{footnote}[2]\\sphinxAtStartFootnote\nauto numbered\n%\n'
'\\end{footnote}') in result
assert '\\begin{footnote}[3]\\sphinxAtStartFootnote\nnamed\n%\n\\end{footnote}' in result
assert '\\sphinxcite{footnote:bar}' in result
assert ('\\bibitem[bar]{footnote:bar}\n\\sphinxAtStartPar\ncite\n') in result
assert '\\sphinxcaption{Table caption \\sphinxfootnotemark[4]' in result
assert ('\\hline%\n\\begin{footnotetext}[4]'
'\\phantomsection\\label{\\thesphinxscope.4}%\n'
'\\sphinxAtStartFootnote\n'
'footnote in table caption\n%\n\\end{footnotetext}\\ignorespaces %\n'
'\\begin{footnotetext}[5]'
'\\phantomsection\\label{\\thesphinxscope.5}%\n'
'\\sphinxAtStartFootnote\n'
'footnote in table header\n%\n\\end{footnotetext}\\ignorespaces '
'\n\\sphinxAtStartPar\n'
'VIDIOC\\_CROPCAP\n&\n\\sphinxAtStartPar\n') in result
assert ('Information about VIDIOC\\_CROPCAP %\n'
'\\begin{footnote}[6]\\sphinxAtStartFootnote\n'
'footnote in table not in header\n%\n\\end{footnote}\n\\\\\n\\hline\n'
'\\end{tabulary}\n'
'\\par\n\\sphinxattableend\\end{savenotes}\n') in result
@pytest.mark.sphinx('latex', testroot='footnotes')
def test_reference_in_caption_and_codeblock_in_footnote(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\caption{This is the figure caption with a reference to '
'\\sphinxcite{index:authoryear}.}' in result)
assert '\\chapter{The section with a reference to {[}AuthorYear{]}}' in result
assert ('\\sphinxcaption{The table title with a reference'
' to {[}AuthorYear{]}}' in result)
assert '\\subsubsection*{The rubric title with a reference to {[}AuthorYear{]}}' in result
assert ('\\chapter{The section with a reference to \\sphinxfootnotemark[5]}\n'
'\\label{\\detokenize{index:the-section-with-a-reference-to}}'
'%\n\\begin{footnotetext}[5]'
'\\phantomsection\\label{\\thesphinxscope.5}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote in section\n%\n\\end{footnotetext}') in result
assert ('\\caption{This is the figure caption with a footnote to '
'\\sphinxfootnotemark[7].}\\label{\\detokenize{index:id29}}\\end{figure}\n'
'%\n\\begin{footnotetext}[7]'
'\\phantomsection\\label{\\thesphinxscope.7}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote in caption\n%\n\\end{footnotetext}') in result
assert ('\\sphinxcaption{footnote \\sphinxfootnotemark[8] in '
'caption of normal table}\\label{\\detokenize{index:id30}}') in result
assert ('\\caption{footnote \\sphinxfootnotemark[9] '
'in caption \\sphinxfootnotemark[10] of longtable\\strut}') in result
assert ('\\endlastfoot\n%\n\\begin{footnotetext}[9]'
'\\phantomsection\\label{\\thesphinxscope.9}%\n'
'\\sphinxAtStartFootnote\n'
'Foot note in longtable\n%\n\\end{footnotetext}\\ignorespaces %\n'
'\\begin{footnotetext}[10]'
'\\phantomsection\\label{\\thesphinxscope.10}%\n'
'\\sphinxAtStartFootnote\n'
'Second footnote in caption of longtable\n') in result
assert ('This is a reference to the code\\sphinxhyphen{}block in the footnote:\n'
'{\\hyperref[\\detokenize{index:codeblockinfootnote}]'
'{\\sphinxcrossref{\\DUrole{std,std-ref}{I am in a footnote}}}}') in result
assert ('&\n\\sphinxAtStartPar\nThis is one more footnote with some code in it %\n'
'\\begin{footnote}[11]\\sphinxAtStartFootnote\n'
'Third footnote in longtable\n') in result
assert ('\\end{sphinxVerbatim}\n%\n\\end{footnote}.\n') in result
assert '\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]' in result
@pytest.mark.sphinx(
'latex', testroot='footnotes',
confoverrides={'latex_show_urls': 'inline'})
def test_latex_show_urls_is_inline(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('Same footnote number \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'footnote in bar\n%\n\\end{footnote} in bar.rst') in result
assert ('Auto footnote number %\n\\begin{footnote}[1]\\sphinxAtStartFootnote\n'
'footnote in baz\n%\n\\end{footnote} in baz.rst') in result
assert ('\\phantomsection\\label{\\detokenize{index:id32}}'
'{\\hyperref[\\detokenize{index:the-section'
'-with-a-reference-to-authoryear}]'
'{\\sphinxcrossref{The section with a reference to '
'\\sphinxcite{index:authoryear}}}}') in result
assert ('\\phantomsection\\label{\\detokenize{index:id33}}'
'{\\hyperref[\\detokenize{index:the-section-with-a-reference-to}]'
'{\\sphinxcrossref{The section with a reference to }}}' in result)
assert ('First footnote: %\n\\begin{footnote}[2]\\sphinxAtStartFootnote\n'
'First\n%\n\\end{footnote}') in result
assert ('Second footnote: \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'Second\n%\n\\end{footnote}') in result
assert '\\sphinxhref{http://sphinx-doc.org/}{Sphinx} (http://sphinx\\sphinxhyphen{}doc.org/)' in result
assert ('Third footnote: %\n\\begin{footnote}[3]\\sphinxAtStartFootnote\n'
'Third \\sphinxfootnotemark[4]\n%\n\\end{footnote}%\n'
'\\begin{footnotetext}[4]'
'\\phantomsection\\label{\\thesphinxscope.4}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote inside footnote\n%\n\\end{footnotetext}\\ignorespaces') in result
assert ('\\sphinxhref{http://sphinx-doc.org/~test/}{URL including tilde} '
'(http://sphinx\\sphinxhyphen{}doc.org/\\textasciitilde{}test/)') in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}{URL in term} '
'(http://sphinx\\sphinxhyphen{}doc.org/)}'
'\n\\sphinxAtStartPar\nDescription' in result)
assert ('\\sphinxlineitem{Footnote in term \\sphinxfootnotemark[6]}'
'%\n\\begin{footnotetext}[6]'
'\\phantomsection\\label{\\thesphinxscope.6}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote in term\n%\n\\end{footnotetext}\\ignorespaces '
'\n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}{Term in deflist} '
'(http://sphinx\\sphinxhyphen{}doc.org/)}'
'\n\\sphinxAtStartPar\nDescription') in result
assert '\\sphinxurl{https://github.com/sphinx-doc/sphinx}\n' in result
assert ('\\sphinxhref{mailto:sphinx-dev@googlegroups.com}'
'{sphinx\\sphinxhyphen{}dev@googlegroups.com}') in result
assert '\\begin{savenotes}\\begin{fulllineitems}' not in result
@pytest.mark.sphinx(
'latex', testroot='footnotes',
confoverrides={'latex_show_urls': 'footnote'})
def test_latex_show_urls_is_footnote(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('Same footnote number \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'footnote in bar\n%\n\\end{footnote} in bar.rst') in result
assert ('Auto footnote number %\n\\begin{footnote}[2]\\sphinxAtStartFootnote\n'
'footnote in baz\n%\n\\end{footnote} in baz.rst') in result
assert ('\\phantomsection\\label{\\detokenize{index:id32}}'
'{\\hyperref[\\detokenize{index:the-section-with-a-reference-to-authoryear}]'
'{\\sphinxcrossref{The section with a reference '
'to \\sphinxcite{index:authoryear}}}}') in result
assert ('\\phantomsection\\label{\\detokenize{index:id33}}'
'{\\hyperref[\\detokenize{index:the-section-with-a-reference-to}]'
'{\\sphinxcrossref{The section with a reference to }}}') in result
assert ('First footnote: %\n\\begin{footnote}[3]\\sphinxAtStartFootnote\n'
'First\n%\n\\end{footnote}') in result
assert ('Second footnote: \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'Second\n%\n\\end{footnote}') in result
assert ('\\sphinxhref{http://sphinx-doc.org/}{Sphinx}'
'%\n\\begin{footnote}[4]\\sphinxAtStartFootnote\n'
'\\sphinxnolinkurl{http://sphinx-doc.org/}\n%\n\\end{footnote}') in result
assert ('Third footnote: %\n\\begin{footnote}[6]\\sphinxAtStartFootnote\n'
'Third \\sphinxfootnotemark[7]\n%\n\\end{footnote}%\n'
'\\begin{footnotetext}[7]'
'\\phantomsection\\label{\\thesphinxscope.7}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote inside footnote\n%\n'
'\\end{footnotetext}\\ignorespaces') in result
assert ('\\sphinxhref{http://sphinx-doc.org/~test/}{URL including tilde}'
'%\n\\begin{footnote}[5]\\sphinxAtStartFootnote\n'
'\\sphinxnolinkurl{http://sphinx-doc.org/~test/}\n%\n\\end{footnote}') in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}'
'{URL in term}\\sphinxfootnotemark[9]}'
'%\n\\begin{footnotetext}[9]'
'\\phantomsection\\label{\\thesphinxscope.9}%\n'
'\\sphinxAtStartFootnote\n'
'\\sphinxnolinkurl{http://sphinx-doc.org/}\n%\n'
'\\end{footnotetext}\\ignorespaces \n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxlineitem{Footnote in term \\sphinxfootnotemark[11]}'
'%\n\\begin{footnotetext}[11]'
'\\phantomsection\\label{\\thesphinxscope.11}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote in term\n%\n\\end{footnotetext}\\ignorespaces '
'\n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}{Term in deflist}'
'\\sphinxfootnotemark[10]}'
'%\n\\begin{footnotetext}[10]'
'\\phantomsection\\label{\\thesphinxscope.10}%\n'
'\\sphinxAtStartFootnote\n'
'\\sphinxnolinkurl{http://sphinx-doc.org/}\n%\n'
'\\end{footnotetext}\\ignorespaces \n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxurl{https://github.com/sphinx-doc/sphinx}\n' in result)
assert ('\\sphinxhref{mailto:sphinx-dev@googlegroups.com}'
'{sphinx\\sphinxhyphen{}dev@googlegroups.com}\n') in result
assert '\\begin{savenotes}\\begin{fulllineitems}' in result
@pytest.mark.sphinx(
'latex', testroot='footnotes',
confoverrides={'latex_show_urls': 'no'})
def test_latex_show_urls_is_no(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('Same footnote number \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'footnote in bar\n%\n\\end{footnote} in bar.rst') in result
assert ('Auto footnote number %\n\\begin{footnote}[1]\\sphinxAtStartFootnote\n'
'footnote in baz\n%\n\\end{footnote} in baz.rst') in result
assert ('\\phantomsection\\label{\\detokenize{index:id32}}'
'{\\hyperref[\\detokenize{index:the-section-with-a-reference-to-authoryear}]'
'{\\sphinxcrossref{The section with a reference '
'to \\sphinxcite{index:authoryear}}}}') in result
assert ('\\phantomsection\\label{\\detokenize{index:id33}}'
'{\\hyperref[\\detokenize{index:the-section-with-a-reference-to}]'
'{\\sphinxcrossref{The section with a reference to }}}' in result)
assert ('First footnote: %\n\\begin{footnote}[2]\\sphinxAtStartFootnote\n'
'First\n%\n\\end{footnote}') in result
assert ('Second footnote: \\sphinxstepexplicit %\n'
'\\begin{footnote}[1]\\phantomsection\\label{\\thesphinxscope.1}%\n'
'\\sphinxAtStartFootnote\n'
'Second\n%\n\\end{footnote}') in result
assert '\\sphinxhref{http://sphinx-doc.org/}{Sphinx}' in result
assert ('Third footnote: %\n\\begin{footnote}[3]\\sphinxAtStartFootnote\n'
'Third \\sphinxfootnotemark[4]\n%\n\\end{footnote}%\n'
'\\begin{footnotetext}[4]'
'\\phantomsection\\label{\\thesphinxscope.4}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote inside footnote\n%\n\\end{footnotetext}\\ignorespaces') in result
assert '\\sphinxhref{http://sphinx-doc.org/~test/}{URL including tilde}' in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}{URL in term}}'
'\n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxlineitem{Footnote in term \\sphinxfootnotemark[6]}'
'%\n\\begin{footnotetext}[6]'
'\\phantomsection\\label{\\thesphinxscope.6}%\n'
'\\sphinxAtStartFootnote\n'
'Footnote in term\n%\n\\end{footnotetext}\\ignorespaces '
'\n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxlineitem{\\sphinxhref{http://sphinx-doc.org/}{Term in deflist}}'
'\n\\sphinxAtStartPar\nDescription') in result
assert ('\\sphinxurl{https://github.com/sphinx-doc/sphinx}\n' in result)
assert ('\\sphinxhref{mailto:sphinx-dev@googlegroups.com}'
'{sphinx\\sphinxhyphen{}dev@googlegroups.com}\n') in result
assert '\\begin{savenotes}\\begin{fulllineitems}' not in result
@pytest.mark.sphinx(
'latex', testroot='footnotes',
confoverrides={'latex_show_urls': 'footnote',
'rst_prolog': '.. |URL| replace:: `text <http://www.example.com/>`__'})
def test_latex_show_urls_footnote_and_substitutions(app, status, warning):
# hyperlinks in substitutions should not effect to make footnotes (refs: #4784)
test_latex_show_urls_is_footnote(app, status, warning)
@pytest.mark.sphinx('latex', testroot='image-in-section')
def test_image_in_section(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert ('\\chapter[Test section]{\\lowercase{\\sphinxincludegraphics'
'[width=15bp,height=15bp]}{{pic}.png} Test section}'
in result)
assert ('\\chapter[Other {[}blah{]} section]{Other {[}blah{]} '
'\\lowercase{\\sphinxincludegraphics[width=15bp,height=15bp]}'
'{{pic}.png} section}' in result)
assert ('\\chapter{Another section}' in result)
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={'latex_logo': 'notfound.jpg'})
def test_latex_logo_if_not_found(app, status, warning):
try:
app.builder.build_all()
assert False # SphinxError not raised
except Exception as exc:
assert isinstance(exc, SphinxError)
@pytest.mark.sphinx('latex', testroot='toctree-maxdepth')
def test_toctree_maxdepth_manual(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\setcounter{tocdepth}{1}' in result
assert '\\setcounter{secnumdepth}' not in result
assert '\\chapter{Foo}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_documents': [
('index', 'python.tex', 'Sphinx Tests Documentation',
'Georg Brandl', 'howto'),
]})
def test_toctree_maxdepth_howto(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\setcounter{tocdepth}{2}' in result
assert '\\setcounter{secnumdepth}' not in result
assert '\\section{Foo}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'root_doc': 'foo'})
def test_toctree_not_found(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\setcounter{tocdepth}' not in result
assert '\\setcounter{secnumdepth}' not in result
assert '\\chapter{Foo A}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'root_doc': 'bar'})
def test_toctree_without_maxdepth(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\setcounter{tocdepth}' not in result
assert '\\setcounter{secnumdepth}' not in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'root_doc': 'qux'})
def test_toctree_with_deeper_maxdepth(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\setcounter{tocdepth}{3}' in result
assert '\\setcounter{secnumdepth}{3}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': None})
def test_latex_toplevel_sectioning_is_None(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\chapter{Foo}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'part'})
def test_latex_toplevel_sectioning_is_part(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\part{Foo}' in result
assert '\\chapter{Foo A}' in result
assert '\\chapter{Foo B}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'part',
'latex_documents': [
('index', 'python.tex', 'Sphinx Tests Documentation',
'Georg Brandl', 'howto')
]})
def test_latex_toplevel_sectioning_is_part_with_howto(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\part{Foo}' in result
assert '\\section{Foo A}' in result
assert '\\section{Foo B}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'chapter'})
def test_latex_toplevel_sectioning_is_chapter(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\chapter{Foo}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'chapter',
'latex_documents': [
('index', 'python.tex', 'Sphinx Tests Documentation',
'Georg Brandl', 'howto')
]})
def test_latex_toplevel_sectioning_is_chapter_with_howto(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\section{Foo}' in result
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'section'})
def test_latex_toplevel_sectioning_is_section(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
assert '\\section{Foo}' in result
@skip_if_stylefiles_notfound
@pytest.mark.sphinx('latex', testroot='maxlistdepth')
def test_maxlistdepth_at_ten(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
print(status.getvalue())
print(warning.getvalue())
compile_latex_document(app, 'python.tex')
@pytest.mark.sphinx('latex', testroot='latex-table')
@pytest.mark.test_params(shared_result='latex-table')
def test_latex_table_tabulars(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
tables = {}
for chap in re.split(r'\\(?:section|chapter){', result)[1:]:
sectname, content = chap.split('}', 1)
tables[sectname] = content.strip()
def get_expected(name):
return (app.srcdir / 'expects' / (name + '.tex')).read_text().strip()
# simple_table
actual = tables['simple table']
expected = get_expected('simple_table')
assert actual == expected
# table having :widths: option
actual = tables['table having :widths: option']
expected = get_expected('table_having_widths')
assert actual == expected
# table having :align: option (tabulary)
actual = tables['table having :align: option (tabulary)']
expected = get_expected('tabulary_having_widths')
assert actual == expected
# table having :align: option (tabular)
actual = tables['table having :align: option (tabular)']
expected = get_expected('tabular_having_widths')
assert actual == expected
# table with tabularcolumn
actual = tables['table with tabularcolumn']
expected = get_expected('tabularcolumn')
assert actual == expected
# table with cell in first column having three paragraphs
actual = tables['table with cell in first column having three paragraphs']
expected = get_expected('table_having_threeparagraphs_cell_in_first_col')
assert actual == expected
# table having caption
actual = tables['table having caption']
expected = get_expected('table_having_caption')
assert actual == expected
# table having verbatim
actual = tables['table having verbatim']
expected = get_expected('table_having_verbatim')
assert actual == expected
# table having problematic cell
actual = tables['table having problematic cell']
expected = get_expected('table_having_problematic_cell')
assert actual == expected
# table having both :widths: and problematic cell
actual = tables['table having both :widths: and problematic cell']
expected = get_expected('table_having_widths_and_problematic_cell')
assert actual == expected
# table having both stub columns and problematic cell
actual = tables['table having both stub columns and problematic cell']
expected = get_expected('table_having_stub_columns_and_problematic_cell')
assert actual == expected
@pytest.mark.sphinx('latex', testroot='latex-table')
@pytest.mark.test_params(shared_result='latex-table')
def test_latex_table_longtable(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
tables = {}
for chap in re.split(r'\\(?:section|chapter){', result)[1:]:
sectname, content = chap.split('}', 1)
tables[sectname] = content.strip()
def get_expected(name):
return (app.srcdir / 'expects' / (name + '.tex')).read_text().strip()
# longtable
actual = tables['longtable']
expected = get_expected('longtable')
assert actual == expected
# longtable having :widths: option
actual = tables['longtable having :widths: option']
expected = get_expected('longtable_having_widths')
assert actual == expected
# longtable having :align: option
actual = tables['longtable having :align: option']
expected = get_expected('longtable_having_align')
assert actual == expected
# longtable with tabularcolumn
actual = tables['longtable with tabularcolumn']
expected = get_expected('longtable_with_tabularcolumn')
assert actual == expected
# longtable having caption
actual = tables['longtable having caption']
expected = get_expected('longtable_having_caption')
assert actual == expected
# longtable having verbatim
actual = tables['longtable having verbatim']
expected = get_expected('longtable_having_verbatim')
assert actual == expected
# longtable having problematic cell
actual = tables['longtable having problematic cell']
expected = get_expected('longtable_having_problematic_cell')
assert actual == expected
# longtable having both :widths: and problematic cell
actual = tables['longtable having both :widths: and problematic cell']
expected = get_expected('longtable_having_widths_and_problematic_cell')
assert actual == expected
# longtable having both stub columns and problematic cell
actual = tables['longtable having both stub columns and problematic cell']
expected = get_expected('longtable_having_stub_columns_and_problematic_cell')
assert actual == expected
@pytest.mark.sphinx('latex', testroot='latex-table')
@pytest.mark.test_params(shared_result='latex-table')
def test_latex_table_complex_tables(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
tables = {}
for chap in re.split(r'\\(?:section|renewcommand){', result)[1:]:
sectname, content = chap.split('}', 1)
tables[sectname] = content.strip()
def get_expected(name):
return (app.srcdir / 'expects' / (name + '.tex')).read_text().strip()
# grid table
actual = tables['grid table']
expected = get_expected('gridtable')
assert actual == expected
# complex spanning cell
actual = tables['complex spanning cell']
expected = get_expected('complex_spanning_cell')
assert actual == expected
@pytest.mark.sphinx('latex', testroot='latex-table',
confoverrides={'templates_path': ['_mytemplates/latex']})
def test_latex_table_custom_template_caseA(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert 'SALUT LES COPAINS' in result
@pytest.mark.sphinx('latex', testroot='latex-table',
confoverrides={'templates_path': ['_mytemplates']})
def test_latex_table_custom_template_caseB(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert 'SALUT LES COPAINS' not in result
@pytest.mark.sphinx('latex', testroot='latex-table')
@pytest.mark.test_params(shared_result='latex-table')
def test_latex_table_custom_template_caseC(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert 'SALUT LES COPAINS' not in result
@pytest.mark.sphinx('latex', testroot='directives-raw')
def test_latex_raw_directive(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
# standard case
assert 'standalone raw directive (HTML)' not in result
assert ('\\label{\\detokenize{index:id1}}\n'
'standalone raw directive (LaTeX)' in result)
# with substitution
assert 'HTML: abc ghi' in result
assert 'LaTeX: abc def ghi' in result
@pytest.mark.sphinx('latex', testroot='images')
def test_latex_images(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
# images are copied
assert '\\sphinxincludegraphics{{python-logo}.png}' in result
assert (app.outdir / 'python-logo.png').exists()
# not found images
assert '\\sphinxincludegraphics{{NOT_EXIST}.PNG}' not in result
assert ('WARNING: Could not fetch remote image: '
'https://www.google.com/NOT_EXIST.PNG [404]' in warning.getvalue())
# an image having target
assert ('\\sphinxhref{https://www.sphinx-doc.org/}'
'{\\sphinxincludegraphics{{rimg}.png}}\n\n' in result)
# a centerized image having target
assert ('\\sphinxhref{https://www.python.org/}{{\\hspace*{\\fill}'
'\\sphinxincludegraphics{{rimg}.png}\\hspace*{\\fill}}}\n\n' in result)
@pytest.mark.sphinx('latex', testroot='latex-index')
def test_latex_index(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert ('A \\index{famous@\\spxentry{famous}}famous '
'\\index{equation@\\spxentry{equation}}equation:\n' in result)
assert ('\n\\index{Einstein@\\spxentry{Einstein}}'
'\\index{relativity@\\spxentry{relativity}}'
'\\ignorespaces \n\\sphinxAtStartPar\nand') in result
assert ('\n\\index{main \\sphinxleftcurlybrace{}@\\spxentry{'
'main \\sphinxleftcurlybrace{}}}\\ignorespaces ' in result)
@pytest.mark.sphinx('latex', testroot='latex-equations')
def test_latex_equations(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
expected = (app.srcdir / 'expects' / 'latex-equations.tex').read_text().strip()
assert expected in result
@pytest.mark.sphinx('latex', testroot='image-in-parsed-literal')
def test_latex_image_in_parsed_literal(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert ('{\\sphinxunactivateextrasandspace \\raisebox{-0.5\\height}'
'{\\sphinxincludegraphics[height=2.00000cm]{{pic}.png}}'
'}AFTER') in result
@pytest.mark.sphinx('latex', testroot='nested-enumerated-list')
def test_latex_nested_enumerated_list(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert ('\\sphinxsetlistlabels{\\arabic}{enumi}{enumii}{}{.}%\n'
'\\setcounter{enumi}{4}\n' in result)
assert ('\\sphinxsetlistlabels{\\alph}{enumii}{enumiii}{}{.}%\n'
'\\setcounter{enumii}{3}\n' in result)
assert ('\\sphinxsetlistlabels{\\arabic}{enumiii}{enumiv}{}{)}%\n'
'\\setcounter{enumiii}{9}\n' in result)
assert ('\\sphinxsetlistlabels{\\arabic}{enumiv}{enumv}{(}{)}%\n'
'\\setcounter{enumiv}{23}\n' in result)
assert ('\\sphinxsetlistlabels{\\roman}{enumii}{enumiii}{}{.}%\n'
'\\setcounter{enumii}{2}\n' in result)
@pytest.mark.sphinx('latex', testroot='footnotes')
def test_latex_thebibliography(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
assert ('\\begin{sphinxthebibliography}{AuthorYe}\n'
'\\bibitem[AuthorYear]{index:authoryear}\n\\sphinxAtStartPar\n'
'Author, Title, Year\n'
'\\end{sphinxthebibliography}\n' in result)
assert '\\sphinxcite{index:authoryear}' in result
@pytest.mark.sphinx('latex', testroot='glossary')
def test_latex_glossary(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert (r'\sphinxlineitem{ähnlich\index{ähnlich@\spxentry{ähnlich}|spxpagem}'
r'\phantomsection'
r'\label{\detokenize{index:term-ahnlich}}}' in result)
assert (r'\sphinxlineitem{boson\index{boson@\spxentry{boson}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-boson}}}' in result)
assert (r'\sphinxlineitem{\sphinxstyleemphasis{fermion}'
r'\index{fermion@\spxentry{fermion}|spxpagem}'
r'\phantomsection'
r'\label{\detokenize{index:term-fermion}}}' in result)
assert (r'\sphinxlineitem{tauon\index{tauon@\spxentry{tauon}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-tauon}}}'
r'\sphinxlineitem{myon\index{myon@\spxentry{myon}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-myon}}}'
r'\sphinxlineitem{electron\index{electron@\spxentry{electron}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-electron}}}' in result)
assert (r'\sphinxlineitem{über\index{über@\spxentry{über}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-uber}}}' in result)
@pytest.mark.sphinx('latex', testroot='latex-labels')
def test_latex_labels(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
# figures
assert (r'\caption{labeled figure}'
r'\label{\detokenize{index:id1}}'
r'\label{\detokenize{index:figure2}}'
r'\label{\detokenize{index:figure1}}'
r'\end{figure}' in result)
assert (r'\caption{labeled figure}'
'\\label{\\detokenize{index:figure3}}\n'
'\\begin{sphinxlegend}\n\\sphinxAtStartPar\n'
'with a legend\n\\end{sphinxlegend}\n'
r'\end{figure}' in result)
# code-blocks
assert (r'\def\sphinxLiteralBlockLabel{'
r'\label{\detokenize{index:codeblock2}}'
r'\label{\detokenize{index:codeblock1}}}' in result)
assert (r'\def\sphinxLiteralBlockLabel{'
r'\label{\detokenize{index:codeblock3}}}' in result)
# tables
assert (r'\sphinxcaption{table caption}'
r'\label{\detokenize{index:id2}}'
r'\label{\detokenize{index:table2}}'
r'\label{\detokenize{index:table1}}' in result)
assert (r'\sphinxcaption{table caption}'
r'\label{\detokenize{index:table3}}' in result)
# sections
assert ('\\chapter{subsection}\n'
r'\label{\detokenize{index:subsection}}'
r'\label{\detokenize{index:section2}}'
r'\label{\detokenize{index:section1}}' in result)
assert ('\\section{subsubsection}\n'
r'\label{\detokenize{index:subsubsection}}'
r'\label{\detokenize{index:section3}}' in result)
assert ('\\subsection{otherdoc}\n'
r'\label{\detokenize{otherdoc:otherdoc}}'
r'\label{\detokenize{otherdoc::doc}}' in result)
# Embedded standalone hyperlink reference (refs: #5948)
assert result.count(r'\label{\detokenize{index:section1}}') == 1
@pytest.mark.sphinx('latex', testroot='latex-figure-in-admonition')
def test_latex_figure_in_admonition(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert(r'\begin{figure}[H]' in result)
def test_default_latex_documents():
from sphinx.util import texescape
texescape.init()
config = Config({'root_doc': 'index',
'project': 'STASI™ Documentation',
'author': "Wolfgang Schäuble & G'Beckstein."})
config.init_values()
config.add('latex_engine', None, True, None)
config.add('latex_theme', 'manual', True, None)
expected = [('index', 'stasi.tex', 'STASI™ Documentation',
r"Wolfgang Schäuble \& G\textquotesingle{}Beckstein.\@{}", 'manual')]
assert default_latex_documents(config) == expected
@skip_if_requested
@skip_if_stylefiles_notfound
@pytest.mark.sphinx('latex', testroot='latex-includegraphics')
def test_includegraphics_oversized(app, status, warning):
app.builder.build_all()
print(status.getvalue())
print(warning.getvalue())
compile_latex_document(app)
@pytest.mark.sphinx('latex', testroot='index_on_title')
def test_index_on_title(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert ('\\chapter{Test for index in top level title}\n'
'\\label{\\detokenize{contents:test-for-index-in-top-level-title}}'
'\\index{index@\\spxentry{index}}\n'
in result)
@pytest.mark.sphinx('latex', testroot='latex-unicode',
confoverrides={'latex_engine': 'pdflatex'})
def test_texescape_for_non_unicode_supported_engine(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
assert 'script small e: e' in result
assert 'double struck italic small i: i' in result
assert r'superscript: \(\sp{\text{0}}\), \(\sp{\text{1}}\)' in result
assert r'subscript: \(\sb{\text{0}}\), \(\sb{\text{1}}\)' in result
@pytest.mark.sphinx('latex', testroot='latex-unicode',
confoverrides={'latex_engine': 'xelatex'})
def test_texescape_for_unicode_supported_engine(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
print(result)
assert 'script small e: e' in result
assert 'double struck italic small i: i' in result
assert 'superscript: ⁰, ¹' in result
assert 'subscript: ₀, ₁' in result
@pytest.mark.sphinx('latex', testroot='basic',
confoverrides={'latex_elements': {'extrapackages': r'\usepackage{foo}'}})
def test_latex_elements_extrapackages(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').read_text()
assert r'\usepackage{foo}' in result
@pytest.mark.sphinx('latex', testroot='nested-tables')
def test_latex_nested_tables(app, status, warning):
app.builder.build_all()
assert '' == warning.getvalue()
@pytest.mark.sphinx('latex', testroot='latex-container')
def test_latex_container(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'python.tex').read_text()
assert r'\begin{sphinxuseclass}{classname}' in result
assert r'\end{sphinxuseclass}' in result
| true | true |
1c3a8c414a28b8a4e64b1e5af1db385c22b871fa | 2,894 | py | Python | atari_environment.py | geek-guild/async-rl | b208b023541cae468ca4c9eceec590b9bfd71abd | [
"MIT"
] | null | null | null | atari_environment.py | geek-guild/async-rl | b208b023541cae468ca4c9eceec590b9bfd71abd | [
"MIT"
] | null | null | null | atari_environment.py | geek-guild/async-rl | b208b023541cae468ca4c9eceec590b9bfd71abd | [
"MIT"
] | null | null | null | import tensorflow as tf
from skimage.transform import resize
from skimage.color import rgb2gray
import numpy as np
from collections import deque
class AtariEnvironment(object):
"""
Small wrapper for gym atari environments.
Responsible for preprocessing screens and holding on to a screen buffer
of size agent_history_length from which environment state
is constructed.
"""
def __init__(self, gym_env, resized_width, resized_height, agent_history_length):
self.env = gym_env
self.resized_width = resized_width
self.resized_height = resized_height
self.agent_history_length = agent_history_length
self.gym_actions = range(gym_env.action_space.n)
if (gym_env.spec.id == "Pong-v0" or gym_env.spec.id == "Breakout-v0"):
print("Doing workaround for pong or breakout")
# Gym returns 6 possible actions for breakout and pong.
# Only three are used, the rest are no-ops. This just lets us
# pick from a simplified "LEFT", "RIGHT", "NOOP" action space.
self.gym_actions = [1,2,3]
# Screen buffer of size AGENT_HISTORY_LENGTH to be able
# to build state arrays of size [1, AGENT_HISTORY_LENGTH, width, height]
self.state_buffer = deque()
def get_initial_state(self):
"""
Resets the atari game, clears the state buffer
"""
# Clear the state buffer
self.state_buffer = deque()
x_t = self.env.reset()
x_t = self.get_preprocessed_frame(x_t)
s_t = np.stack((x_t, x_t, x_t, x_t), axis = 0)
for i in range(self.agent_history_length-1):
self.state_buffer.append(x_t)
return s_t
def get_preprocessed_frame(self, observation):
"""
See Methods->Preprocessing in Mnih et al.
1) Get image grayscale
2) Rescale image
"""
return resize(rgb2gray(observation), (self.resized_width, self.resized_height))
def step(self, action_index):
"""
Excecutes an action in the gym environment.
Builds current state (concatenation of agent_history_length-1 previous frames and current one).
Pops oldest frame, adds current frame to the state buffer.
Returns current state.
"""
x_t1, r_t, terminal, info = self.env.step(self.gym_actions[action_index])
x_t1 = self.get_preprocessed_frame(x_t1)
previous_frames = np.array(self.state_buffer)
s_t1 = np.empty((self.agent_history_length, self.resized_height, self.resized_width))
s_t1[:self.agent_history_length-1, ...] = previous_frames
s_t1[self.agent_history_length-1] = x_t1
# Pop the oldest frame, add the current frame to the queue
self.state_buffer.popleft()
self.state_buffer.append(x_t1)
return s_t1, r_t, terminal, info
| 38.078947 | 103 | 0.660677 | import tensorflow as tf
from skimage.transform import resize
from skimage.color import rgb2gray
import numpy as np
from collections import deque
class AtariEnvironment(object):
def __init__(self, gym_env, resized_width, resized_height, agent_history_length):
self.env = gym_env
self.resized_width = resized_width
self.resized_height = resized_height
self.agent_history_length = agent_history_length
self.gym_actions = range(gym_env.action_space.n)
if (gym_env.spec.id == "Pong-v0" or gym_env.spec.id == "Breakout-v0"):
print("Doing workaround for pong or breakout")
self.gym_actions = [1,2,3]
self.state_buffer = deque()
def get_initial_state(self):
self.state_buffer = deque()
x_t = self.env.reset()
x_t = self.get_preprocessed_frame(x_t)
s_t = np.stack((x_t, x_t, x_t, x_t), axis = 0)
for i in range(self.agent_history_length-1):
self.state_buffer.append(x_t)
return s_t
def get_preprocessed_frame(self, observation):
return resize(rgb2gray(observation), (self.resized_width, self.resized_height))
def step(self, action_index):
x_t1, r_t, terminal, info = self.env.step(self.gym_actions[action_index])
x_t1 = self.get_preprocessed_frame(x_t1)
previous_frames = np.array(self.state_buffer)
s_t1 = np.empty((self.agent_history_length, self.resized_height, self.resized_width))
s_t1[:self.agent_history_length-1, ...] = previous_frames
s_t1[self.agent_history_length-1] = x_t1
self.state_buffer.popleft()
self.state_buffer.append(x_t1)
return s_t1, r_t, terminal, info
| true | true |
1c3a8c56543c4ec65cc7575eab9aa5d09bc0d6f9 | 3,668 | py | Python | toontown/classicchars/DistributedMickey.py | LittleNed/toontown-stride | 1252a8f9a8816c1810106006d09c8bdfe6ad1e57 | [
"Apache-2.0"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | toontown/classicchars/DistributedMickey.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | null | null | null | toontown/classicchars/DistributedMickey.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | 4 | 2019-06-20T23:45:23.000Z | 2020-10-14T20:30:15.000Z | from pandac.PandaModules import *
from toontown.classicchars import DistributedCCharBase
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.classicchars import CharStateDatas
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.hood import DGHood
class DistributedMickey(DistributedCCharBase.DistributedCCharBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMickey')
def __init__(self, cr):
try:
self.DistributedMickey_initialized
return
except:
self.DistributedMickey_initialized = 1
DistributedCCharBase.DistributedCCharBase.__init__(self, cr, TTLocalizer.Mickey, 'mk')
self.fsm = ClassicFSM.ClassicFSM(self.getName(), [State.State('Off', self.enterOff, self.exitOff, ['Neutral']), State.State('Neutral', self.enterNeutral, self.exitNeutral, ['Walk']), State.State('Walk', self.enterWalk, self.exitWalk, ['Neutral'])], 'Off', 'Off')
self.fsm.enterInitialState()
self.handleHolidays()
def disable(self):
self.fsm.requestFinalState()
DistributedCCharBase.DistributedCCharBase.disable(self)
self.neutralDoneEvent = None
self.neutral = None
self.walkDoneEvent = None
self.walk = None
self.fsm.requestFinalState()
self.notify.debug('Mickey Disbled')
def delete(self):
try:
self.DistributedMickey_deleted
return
except:
self.DistributedMickey_deleted = 1
del self.fsm
DistributedCCharBase.DistributedCCharBase.delete(self)
self.notify.debug('Mickey Deleted')
def generate(self):
DistributedCCharBase.DistributedCCharBase.generate(self, self.diffPath)
name = self.getName()
self.neutralDoneEvent = self.taskName(name + '-neutral-done')
self.neutral = CharStateDatas.CharNeutralState(self.neutralDoneEvent, self)
self.walkDoneEvent = self.taskName(name + '-walk-done')
if self.diffPath == None:
self.walk = CharStateDatas.CharWalkState(self.walkDoneEvent, self)
else:
self.walk = CharStateDatas.CharWalkState(self.walkDoneEvent, self, self.diffPath)
self.fsm.request('Neutral')
def enterOff(self):
pass
def exitOff(self):
pass
def enterNeutral(self):
self.neutral.enter()
self.acceptOnce(self.neutralDoneEvent, self.__decideNextState)
def exitNeutral(self):
self.ignore(self.neutralDoneEvent)
self.neutral.exit()
def enterWalk(self):
self.walk.enter()
self.acceptOnce(self.walkDoneEvent, self.__decideNextState)
def exitWalk(self):
self.ignore(self.walkDoneEvent)
self.walk.exit()
def __decideNextState(self, doneStatus):
self.fsm.request('Neutral')
def setWalk(self, srcNode, destNode, timestamp):
if destNode and not destNode == srcNode:
self.walk.setWalk(srcNode, destNode, timestamp)
self.fsm.request('Walk')
def walkSpeed(self):
return ToontownGlobals.MickeySpeed
def handleHolidays(self):
DistributedCCharBase.DistributedCCharBase.handleHolidays(self)
if hasattr(base.cr, 'newsManager') and base.cr.newsManager:
holidayIds = base.cr.newsManager.getHolidayIdList()
if ToontownGlobals.APRIL_FOOLS_COSTUMES in holidayIds and isinstance(self.cr.playGame.hood, DGHood.DGHood):
self.diffPath = TTLocalizer.Daisy
| 36.68 | 270 | 0.681843 | from pandac.PandaModules import *
from toontown.classicchars import DistributedCCharBase
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.classicchars import CharStateDatas
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.hood import DGHood
class DistributedMickey(DistributedCCharBase.DistributedCCharBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMickey')
def __init__(self, cr):
try:
self.DistributedMickey_initialized
return
except:
self.DistributedMickey_initialized = 1
DistributedCCharBase.DistributedCCharBase.__init__(self, cr, TTLocalizer.Mickey, 'mk')
self.fsm = ClassicFSM.ClassicFSM(self.getName(), [State.State('Off', self.enterOff, self.exitOff, ['Neutral']), State.State('Neutral', self.enterNeutral, self.exitNeutral, ['Walk']), State.State('Walk', self.enterWalk, self.exitWalk, ['Neutral'])], 'Off', 'Off')
self.fsm.enterInitialState()
self.handleHolidays()
def disable(self):
self.fsm.requestFinalState()
DistributedCCharBase.DistributedCCharBase.disable(self)
self.neutralDoneEvent = None
self.neutral = None
self.walkDoneEvent = None
self.walk = None
self.fsm.requestFinalState()
self.notify.debug('Mickey Disbled')
def delete(self):
try:
self.DistributedMickey_deleted
return
except:
self.DistributedMickey_deleted = 1
del self.fsm
DistributedCCharBase.DistributedCCharBase.delete(self)
self.notify.debug('Mickey Deleted')
def generate(self):
DistributedCCharBase.DistributedCCharBase.generate(self, self.diffPath)
name = self.getName()
self.neutralDoneEvent = self.taskName(name + '-neutral-done')
self.neutral = CharStateDatas.CharNeutralState(self.neutralDoneEvent, self)
self.walkDoneEvent = self.taskName(name + '-walk-done')
if self.diffPath == None:
self.walk = CharStateDatas.CharWalkState(self.walkDoneEvent, self)
else:
self.walk = CharStateDatas.CharWalkState(self.walkDoneEvent, self, self.diffPath)
self.fsm.request('Neutral')
def enterOff(self):
pass
def exitOff(self):
pass
def enterNeutral(self):
self.neutral.enter()
self.acceptOnce(self.neutralDoneEvent, self.__decideNextState)
def exitNeutral(self):
self.ignore(self.neutralDoneEvent)
self.neutral.exit()
def enterWalk(self):
self.walk.enter()
self.acceptOnce(self.walkDoneEvent, self.__decideNextState)
def exitWalk(self):
self.ignore(self.walkDoneEvent)
self.walk.exit()
def __decideNextState(self, doneStatus):
self.fsm.request('Neutral')
def setWalk(self, srcNode, destNode, timestamp):
if destNode and not destNode == srcNode:
self.walk.setWalk(srcNode, destNode, timestamp)
self.fsm.request('Walk')
def walkSpeed(self):
return ToontownGlobals.MickeySpeed
def handleHolidays(self):
DistributedCCharBase.DistributedCCharBase.handleHolidays(self)
if hasattr(base.cr, 'newsManager') and base.cr.newsManager:
holidayIds = base.cr.newsManager.getHolidayIdList()
if ToontownGlobals.APRIL_FOOLS_COSTUMES in holidayIds and isinstance(self.cr.playGame.hood, DGHood.DGHood):
self.diffPath = TTLocalizer.Daisy
| true | true |
1c3a8dfa2890051addf80f856407956287991ab1 | 3,121 | py | Python | forms/edit_customer_form.py | chucktilbury/Accounting | 15c467dac4405e872f3820e3ff35a53240335631 | [
"MIT"
] | null | null | null | forms/edit_customer_form.py | chucktilbury/Accounting | 15c467dac4405e872f3820e3ff35a53240335631 | [
"MIT"
] | null | null | null | forms/edit_customer_form.py | chucktilbury/Accounting | 15c467dac4405e872f3820e3ff35a53240335631 | [
"MIT"
] | null | null | null | from system.forms import Forms
from system.logger import *
#from dialogs.edit_dialogs import *
from dialogs.select_dialog import *
@class_wrapper
class _edit_customer_form(Forms):
def __init__(self, owner, row_index):
self.logger.set_level(Logger.DEBUG)
super().__init__(owner, 'Customer')
self.row_index = row_index
def add_form(self):
width1 = 70
width2 = 28
self.add_label('Date:')
self.add_dynamic_label('date_created', 1, bg='white', width=width2, anchor='w')
self.add_spacer(2)
self.add_label('Name:')
wid = self.add_entry('name', 3, str, width=width1)
self.add_dupe_check(wid)
self.add_label('Address1:')
self.add_entry('address1', 3, str, width=width1)
self.add_label('Address2:')
self.add_entry('address2', 3, str, width=width1)
self.add_label('City:')
self.add_entry('city', 1, str, width=width2)
self.add_label('State:')
self.add_entry('state', 1, str, width=width2)
self.add_label('Zip Code:')
self.add_entry('zip', 1, str, width=width2)
self.add_label('Country:')
self.add_combo('country_ID', 1, 'Country', 'name', width=width2)
self.add_label('Email:')
self.add_entry('email_address', 1, str, width=width2)
self.add_label('Email Status:')
self.add_combo('email_status_ID', 1, 'EmailStatus', 'name', width=width2)
self.add_label('Phone:')
self.add_entry('phone_number', 1, str, width=width2)
self.add_label('Phone Status:')
self.add_combo('phone_status_ID', 1, 'PhoneStatus', 'name', width=width2)
self.add_label('Web Site:')
self.add_entry('web_site', 1, str, width=width2)
self.add_label('Class:')
self.add_combo('class_ID', 1, 'ContactClass', 'name', width=width2)
self.add_label('Description:')
self.add_entry('description', 3, str, width=width1)
self.add_label('Notes:')
self.add_text('notes', 3, width=77, height=10)
def add_edit_buttons(self):
self.add_ctl_button('Prev')
self.add_ctl_button('Next')
#self.add_ctl_button('Select', 'name')
self.add_select_button(SelectDialog, owner=self.owner ,table=self.table, column='name')
self.add_btn_spacer()
self.add_ctl_button('Save')
def add_new_buttons(self):
self.add_ctl_button('Save', new_flag=True)
@class_wrapper
class EditCustomerForm(_edit_customer_form):
'''
'''
def __init__(self, owner, row_index):
self.logger.set_level(Logger.DEBUG)
super().__init__(owner, row_index)
self.add_title('Edit Customer')
self.add_form()
self.add_edit_buttons()
self.load_form()
@class_wrapper
class NewCustomerForm(_edit_customer_form):
'''
'''
def __init__(self, owner, row_index):
self.logger.set_level(Logger.DEBUG)
super().__init__(owner, row_index)
self.add_title('New Customer')
self.add_form()
self.add_new_buttons()
self.clear_form()
| 31.525253 | 95 | 0.631849 | from system.forms import Forms
from system.logger import *
from dialogs.select_dialog import *
@class_wrapper
class _edit_customer_form(Forms):
def __init__(self, owner, row_index):
self.logger.set_level(Logger.DEBUG)
super().__init__(owner, 'Customer')
self.row_index = row_index
def add_form(self):
width1 = 70
width2 = 28
self.add_label('Date:')
self.add_dynamic_label('date_created', 1, bg='white', width=width2, anchor='w')
self.add_spacer(2)
self.add_label('Name:')
wid = self.add_entry('name', 3, str, width=width1)
self.add_dupe_check(wid)
self.add_label('Address1:')
self.add_entry('address1', 3, str, width=width1)
self.add_label('Address2:')
self.add_entry('address2', 3, str, width=width1)
self.add_label('City:')
self.add_entry('city', 1, str, width=width2)
self.add_label('State:')
self.add_entry('state', 1, str, width=width2)
self.add_label('Zip Code:')
self.add_entry('zip', 1, str, width=width2)
self.add_label('Country:')
self.add_combo('country_ID', 1, 'Country', 'name', width=width2)
self.add_label('Email:')
self.add_entry('email_address', 1, str, width=width2)
self.add_label('Email Status:')
self.add_combo('email_status_ID', 1, 'EmailStatus', 'name', width=width2)
self.add_label('Phone:')
self.add_entry('phone_number', 1, str, width=width2)
self.add_label('Phone Status:')
self.add_combo('phone_status_ID', 1, 'PhoneStatus', 'name', width=width2)
self.add_label('Web Site:')
self.add_entry('web_site', 1, str, width=width2)
self.add_label('Class:')
self.add_combo('class_ID', 1, 'ContactClass', 'name', width=width2)
self.add_label('Description:')
self.add_entry('description', 3, str, width=width1)
self.add_label('Notes:')
self.add_text('notes', 3, width=77, height=10)
def add_edit_buttons(self):
self.add_ctl_button('Prev')
self.add_ctl_button('Next')
self.add_select_button(SelectDialog, owner=self.owner ,table=self.table, column='name')
self.add_btn_spacer()
self.add_ctl_button('Save')
def add_new_buttons(self):
self.add_ctl_button('Save', new_flag=True)
@class_wrapper
class EditCustomerForm(_edit_customer_form):
def __init__(self, owner, row_index):
self.logger.set_level(Logger.DEBUG)
super().__init__(owner, row_index)
self.add_title('Edit Customer')
self.add_form()
self.add_edit_buttons()
self.load_form()
@class_wrapper
class NewCustomerForm(_edit_customer_form):
def __init__(self, owner, row_index):
self.logger.set_level(Logger.DEBUG)
super().__init__(owner, row_index)
self.add_title('New Customer')
self.add_form()
self.add_new_buttons()
self.clear_form()
| true | true |
1c3a8e1b7776bb7c26532d73ed96fde7231f2dc6 | 1,122 | py | Python | contents/app_unittest.py | joeyster/Travel-Planner | 85e21b7277a50621ed8cf4b2aa5d1b80d3d47005 | [
"MIT"
] | null | null | null | contents/app_unittest.py | joeyster/Travel-Planner | 85e21b7277a50621ed8cf4b2aa5d1b80d3d47005 | [
"MIT"
] | null | null | null | contents/app_unittest.py | joeyster/Travel-Planner | 85e21b7277a50621ed8cf4b2aa5d1b80d3d47005 | [
"MIT"
] | null | null | null | import unittest
import app
# Note: the class must be called Test
class Test(unittest.TestCase):
def test_if_it_runs_to_the_end(self):
#self.assertEqual(app.testing_address(["California", "Nevada", "Illinois", "New York", "Maine"],["Nevada", "Illinois", "New York", "Maine"]), True)
#self.assertEqual(app.testing_address(['Seattle', 'Chicago', 'Anaheim', 'Miami'], ['Chicago', 'Anaheim', 'Miami']), True)
# self.assertEqual(app.testing_address(['San diego', 'Anaheim', 'Costa Mesa', 'Seattle'], ['Anaheim', 'Costa Mesa', 'Seattle']), True)
# self.assertEqual(app.testing_address(['Anaheim', 'Buena Park', 'Cypress', 'Fullerton'], ['Buena Park', 'Cypress', 'Fullerton']), True)
# self.assertEqual(app.testing_address(["California", "Nevada", "Illinois", "New York", "Washington", "Maine"], ["Nevada", "Illinois", "New York", "Washington", "Maine"]), True) #bad apple
self.assertEqual(app.testing_address(["California", "Montana", "Wyoming", "Idaho", "Texas", "Florida", "Maine"], ["Montana", "Wyoming", "Idaho", "Texas", "Florida", "Maine"]), True)
if __name__ == '__main__':
unittest.main()
| 70.125 | 192 | 0.66934 | import unittest
import app
class Test(unittest.TestCase):
def test_if_it_runs_to_the_end(self):
assertEqual(app.testing_address(["California", "Montana", "Wyoming", "Idaho", "Texas", "Florida", "Maine"], ["Montana", "Wyoming", "Idaho", "Texas", "Florida", "Maine"]), True)
if __name__ == '__main__':
unittest.main()
| true | true |
1c3a8e554480fc8f2650a603e40fee871e2120be | 29,684 | py | Python | programs/tfFromJson.py | Nino-cunei/ninmed | 30f3c096201184c71279eebd572c4fcae0f3dcea | [
"MIT"
] | null | null | null | programs/tfFromJson.py | Nino-cunei/ninmed | 30f3c096201184c71279eebd572c4fcae0f3dcea | [
"MIT"
] | null | null | null | programs/tfFromJson.py | Nino-cunei/ninmed | 30f3c096201184c71279eebd572c4fcae0f3dcea | [
"MIT"
] | null | null | null | import sys
import os
import re
import json
import yaml
from shutil import rmtree
from tf.fabric import Fabric
from tf.convert.walker import CV
HELP = """
python3 tfFromJson.py
Generate TF and if successful, load it
python3 tfFromJson.py -Pnumber
python3 tfFromJson.py -Pnumber:obverse
python3 tfFromJson.py -Pnumber:obverse:2:1
Generate TF, only this Pnumber, face, line, do not load it
Primes in numbers are not relevant
python3 tfFromJson.py -skipgen
Load TF
python3 tfFromJson.py -skipload
Generate TF but do not load it
"""
TEST = """
P365742-K.2354: 63.: [x x x x {ši]m}LI GAZ KI ZI₃.KUM HI.HI ina A GAZI{sar} SILA₁₁-aš LAL IGI GIG tu-gal-lab EN TI.LA LAL₂
P394523-K.2573: 17'.: [ina A.M]E[Š? ... UKU]Š₂.LAGAB U₂-BABBAR x [... t]e-qi₂?#
P394520-K.2570: 9'.: [DIŠ NA IGI.MIN-šu₂ GIG? ... {u₂}EME.U]R.GI₇ {u₂}IN₆.UŠ ina ZI₃.KUM HE.HE ina GEŠTIN N[A]G?
"""
GH = os.path.expanduser("~/github")
ORG = "Nino-cunei"
REPO = "ninmed"
REPO_DIR = f"{GH}/{ORG}/{REPO}"
REPORT_DIR = f"{REPO_DIR}/report"
DECL_PATH = f"{REPO_DIR}/yaml"
META_DECL_FILE = f"{DECL_PATH}/meta.yaml"
LINE_TYPES = set(
"""
EmptyLine
TextLine
ColumnAtLine
DiscourseAtLine
SurfaceAtLine
ControlLine
NoteLine
TranslationLine
LooseDollarLine
RulingDollarLine
SealDollarLine
""".strip().split()
)
def readYaml(fileName):
if os.path.exists(fileName):
with open(fileName) as y:
y = yaml.load(y, Loader=yaml.FullLoader)
else:
y = {}
return y
META_DECL = readYaml(META_DECL_FILE)
VERSION_SRC = META_DECL["versionSrc"]
VERSION_TF = META_DECL["versionTf"]
IN_DIR = f"{REPO_DIR}/source/json/{VERSION_SRC}"
TF_DIR = f"{REPO_DIR}/tf"
OUT_DIR = f"{TF_DIR}/{VERSION_TF}"
META_FIELDS = {
"collection": ("collection", "str"),
"description": ("description", "str"),
"museum.name": ("museum", "str"),
"cdliNumber": ("pnumber", "str"),
"number": ("docnumber", "str"),
"publication": ("publication", "str"),
}
flagging = {
"*": "collated",
"!": "remarkable",
"?": "question",
"#": "damage",
}
clusterSpecs = (
("BrokenAway", "missing", "[", "]", None),
("PerhapsBrokenAway", "uncertain", "(", ")", None),
("Removal", "excised", "<<", ">>", None),
("AccidentalOmission", "supplied", "<", ">", None),
("DocumentOrientedGloss", "gloss", "{(", ")}", None),
("Determinative", "det", "{", "}", None),
("LoneDeterminative", "det", "{", "}", None),
("Erasure", "erasure", "°", "°", "\\"),
)
clusterType = {x[0]: x[1] for x in clusterSpecs}
clusterChar = {x[1]: {True: x[2], False: x[3], None: x[4]} for x in clusterSpecs}
commentTypes = set(
"""
ruling
colofon
note
comment
seal
tr@en
""".strip().split()
)
languages = set(
"""
en
""".strip().split()
)
LIGA = "␣"
TRANS_RE = re.compile(r"""@i\{([^}]*)\}""", re.S)
# TF CONFIGURATION
slotType = "sign"
generic = {
"name": META_DECL["name"],
"editor": META_DECL["editor"],
"project": META_DECL["project"],
"converters": META_DECL["converters"],
}
otext = {
"fmt:text-orig-full": "{atfpre}{atf}{atfpost}{after}",
"fmt:text-orig-plain": "{sym}{after}",
"sectionFeatures": "pnumber,face,lnno",
"sectionTypes": "document,face,line",
}
intFeatures = (
set(
"""
ln
lln
col
number
primeln
primecol
trans
variant
""".strip().split()
)
| set(flagging.values())
| set(clusterType.values())
| {x[1][0] for x in META_FIELDS.items() if x[1][1] == "int"}
)
featureMeta = {
"after": {"description": "what comes after a sign or word (- or space)"},
"atf": {"description": "full atf of a sign"},
"atfpost": {"description": "cluster characters that follow a sign or word"},
"atfpre": {"description": "cluster characters that precede a sign or word"},
"col": {"description": "ATF column number"},
"collated": {"description": "whether a sign is collated (*)"},
"collection": {"description": 'collection name from metadata field "collection"'},
"colofon": {"description": "colofon comment to a line"},
"comment": {"description": "comment to a line"},
"damage": {"description": "whether a sign is damaged"},
"description": {"description": 'description from metadata field "description"'},
"det": {
"description": "whether a sign is a determinative gloss - between { }",
},
"docnumber": {"description": 'document number from metadata field "number"'},
"erasure": {
"description": (
"whether a sign is in an erasure - between ° \\ °: "
"1: between ° and \\; 2: between \\ and °"
),
},
"excised": {
"description": "whether a sign is excised - between << >>",
},
"face": {"description": "full name of a face including the enclosing object"},
"flags": {"description": "sequence of flags after a sign"},
"gloss": {
"description": "whether a sign belongs to a gloss - between {( )}",
},
"grapheme": {"description": "grapheme of a sign"},
"lang": {
"description": (
"language of a document, word, or sign:"
" absent: Akkadian; sux: Sumerian; sb: Standard Babylonian"
)
},
"lemma": {
"description": (
"lemma of a word:"
"comma-separated values of the uniqueLemma field in the JSON source"
)
},
"lln": {"description": "logical line number of a numbered line"},
"ln": {"description": "ATF line number of a numbered line, without prime"},
"lnno": {
"description": (
"ATF line number, may be $ or #, with prime; column number prepended"
),
},
"missing": {
"description": "whether a sign is missing - between [ ]",
},
"modifiers": {"description": "sequence of modifiers after a sign"},
"museum": {"description": 'museum name from metadata field "museum.name"'},
"note": {"description": "note comment to a line"},
"number": {"description": "numeric value of a number sign"},
"pnumber": {"description": "P number of a document"},
"primecol": {"description": "whether a prime is present on a column number"},
"primeln": {"description": "whether a prime is present on a line number"},
"publication": {
"description": 'publication info from metadata field "publication"'
},
"question": {"description": "whether a sign has the question flag (?)"},
"reading": {"description": "reading of a sign"},
"remarkable": {"description": "whether a sign is remarkable (!)"},
"ruling": {"description": "ruling comment to a line"},
"seal": {"description": "seal comment to a line"},
"sym": {"description": "essential part of a sign or of a word"},
"supplied": {
"description": "whether a sign is supplied - between < >",
},
"trans": {"description": "whether a line has a translation"},
"tr@en": {"description": "english translation of a line"},
"type": {"description": "name of a type of cluster or kind of sign"},
"uncertain": {"description": "whether a sign is uncertain - between ( )"},
"variant": {
"description": (
"if sign is part of a variant pair, "
"this is the sequence number of the variant (1 or 2)"
)
},
}
def msg(m):
sys.stdout.write(f"{m}\n")
def getJsonFiles():
filePaths = []
def walk(path):
with os.scandir(path) as it:
for entry in it:
name = entry.name
if not name.startswith(".") and entry.is_dir():
walk(f"{path}/{name}")
elif name.endswith(".json") and entry.is_file:
filePaths.append(f"{path}/{name}")
walk(IN_DIR)
return sorted(filePaths)
def readJsonFile(path):
with open(path) as fh:
data = json.load(fh)
return data
def writeReport(fName, lines):
with open(f"{REPORT_DIR}/{fName}", "w") as fh:
for line in lines:
fh.write(f"{line}\n")
def getConverter():
TF = Fabric(locations=OUT_DIR)
return CV(TF)
PNUMBER = None
FACE = None
LINE = None
def convert():
if generateTf:
if os.path.exists(OUT_DIR):
rmtree(OUT_DIR)
os.makedirs(OUT_DIR, exist_ok=True)
cv = getConverter()
return cv.walk(
director,
slotType,
otext=otext,
generic=generic,
intFeatures=intFeatures,
featureMeta=featureMeta,
generateTf=generateTf,
)
# DIRECTOR
def director(cv):
DEBUG = False
curClusters = {cluster: (None, 0) for cluster in clusterType.values()}
def debug(m):
if DEBUG:
msg(f"INFO ======> {m}")
sys.stdout.flush()
def error(m, stop=False):
msg(f"ERROR ======> {m}")
sys.stdout.flush()
if stop:
quit()
def terminateClusters():
for tp in curClusters:
(node, val) = curClusters[tp]
if node:
cv.terminate(node)
curClusters[tp] = (None, 0)
def doCluster(data, cluster, on=None, off=None):
makeOn = data["side"] == "LEFT" if on is None else on
makeOff = data["side"] == "RIGHT" if off is None else off
makeAlt = data["side"] == "CENTER" if off is None and on is None else off and on
status = True if makeOn else False if makeOff else None
if status is None and not makeAlt:
error(f"cluster {cluster} not on and not off and not alt", stop=True)
(clusterNode, clusterVal) = curClusters[cluster]
if status is True:
if clusterNode is not None:
error(f"cluster {cluster} is nesting", stop=True)
clusterNode = cv.node("cluster")
curClusters[cluster] = (clusterNode, 1)
cv.feature(clusterNode, type=cluster)
elif status is False:
if clusterNode is None:
error(f"cluster {cluster} is spuriously closed", stop=True)
else:
cv.terminate(clusterNode)
curClusters[cluster] = (None, 0)
elif status is None:
if clusterNode is None or clusterVal == 0:
error(f"cluster {cluster} is missing first part", stop=True)
elif clusterVal > 1:
error(f"cluster {cluster} has too many parts", stop=True)
else:
curClusters[cluster] = (clusterNode, 2)
return status
def getClusters():
return {
cluster: val
for (cluster, (node, val)) in curClusters.items()
if node is not None
}
def doFlags(data, cur, extraFlag):
flagList = data.get("flags", [])
if extraFlag:
flagList.append(extraFlag)
if len(flagList):
flags = "".join(flagList)
atts = {flagging[flag]: 1 for flag in flags}
cv.feature(cur, flags=flags, **atts)
def doModifiers(data, cur):
modifierList = data.get("modifiers", [])
if len(modifierList):
modifiers = "".join(m[1:] for m in modifierList)
cv.feature(cur, modifiers=f"@{modifiers}")
def doSign(data, wordAfter, isLast, **features):
nonlocal curSign
nonlocal nextPre
nonlocal lang
signType = data["type"]
after = wordAfter if isLast else ""
if signType in {
"AccidentalOmission",
"Erasure",
"Removal",
"BrokenAway",
"PerhapsBrokenAway",
"DocumentOrientedGloss",
}:
cluster = clusterType[signType]
status = doCluster(data, cluster)
if status is True:
nextPre += clusterChar[cluster][status]
elif status is False:
cv.feature(
curSign,
after=(cv.get("after", curSign) or "") + after,
atfpost=(cv.get("atfpost", curSign) or "")
+ clusterChar[cluster][status],
)
elif status is None:
nextPre += clusterChar[cluster][status]
elif signType == "LanguageShift":
lang = data["cleanValue"][1:]
nextPre += f"%{lang} "
if lang == "akk":
lang = None
elif signType == "Joiner":
if curSign is not None:
cv.feature(curSign, after=data["value"])
else:
atf = data["value"]
atfPre = ""
atfPost = ""
extraFlag = ""
indexStart = atf.find("[")
indexEnd = atf.find("]")
startMissingInternal = atf != "[" and indexStart >= 0
endMissingInternal = atf != "]" and indexEnd >= 0
if startMissingInternal and endMissingInternal:
if indexStart < indexEnd:
extraFlag = "#"
atf = atf.replace("[", "").replace("]", "") + extraFlag
else:
atf = atf.replace("[", "").replace("]", "")
elif startMissingInternal:
atf = atf.replace("[", "")
atfPre = "["
doCluster(data, "missing", on=True, off=False)
curSign = cv.slot()
if endMissingInternal and not startMissingInternal:
atf = atf.replace("]", "")
atfPost += "]"
doCluster(data, "missing", on=False, off=True)
thisPre = nextPre + atfPre
atfPreFeat = dict(atfpre=thisPre) if thisPre else {}
atfPre = ""
nextPre = ""
atfPostFeat = dict(atfpost=atfPost) if atfPost else {}
atfPost = ""
cv.feature(
curSign,
**atfPreFeat,
**atfPostFeat,
atf=atf,
**getClusters(),
**features,
)
doFlags(data, curSign, extraFlag)
doModifiers(data, curSign)
sym = data["cleanValue"]
feats = {}
if signType == "UnknownNumberOfSigns":
tp = "ellipsis"
elif signType == "UnidentifiedSign":
tp = "unknown"
elif signType == "UnclearSign":
tp = "unknown"
elif signType == "Number":
tp = "numeral"
feats = dict(number=sym)
elif signType == "Logogram":
tp = "grapheme"
feats = dict(grapheme=sym)
elif signType == "Reading":
tp = "reading"
sign = data.get("sign", None)
reading = data["name"]
if sign is None:
feats = dict(reading=reading)
else:
grapheme = sign["cleanValue"]
feats = dict(reading=reading, grapheme=grapheme)
elif signType == "Joiner":
tp = "joiner"
elif signType == "Divider":
tp = "wdiv"
after = wordAfter
elif signType == "Determinative" or signType == "Variant":
error(f"nested {signType} Signs", stop=True)
else:
error(f"unrecognized sign type {signType}", stop=True)
cv.feature(curSign, type=tp, after=after, sym=sym, **feats)
paths = getJsonFiles()
skipFace = FACE is not None
skipLine = LINE is not None
for (i, path) in enumerate(paths):
fileName = path.split("/")[-1].rsplit(".", 1)[0]
docData = readJsonFile(path)
metaData = {}
for (origField, (field, tp)) in META_FIELDS.items():
origFields = origField.split(".", 1)
metaData[field] = (
docData[origField]
if len(origFields) == 1
else docData[origFields[0]][origFields[1]]
)
pNumber = metaData["pnumber"]
if PNUMBER is not None and PNUMBER != pNumber:
continue
textData = docData["text"]["allLines"]
nLines = len(textData)
msg(f"{i + 1:>3} {nLines:>4} lines in {fileName}")
if nLines == 0:
continue
curDoc = cv.node("document")
cv.feature(curDoc, **metaData)
curFace = None
curFaceValue = None
col = None
primecol = None
lln = 0
prevLine = None
for lineData in textData:
lang = None
lineType = lineData["type"]
content = " ".join(c["value"] for c in lineData["content"])
atf = f"{lineData['prefix']} {content}"
isFaceLine = lineType == "SurfaceAtLine"
if isFaceLine:
thisFaceValue = lineData["label"]["surface"].lower()
if FACE is not None:
skipFace = thisFaceValue != FACE
if thisFaceValue == curFaceValue:
continue
if skipFace:
continue
if isFaceLine or not curFace:
if isFaceLine and curFace:
terminateClusters()
cv.terminate(curFace)
curFace = cv.node("face")
col = None
primecol = None
lln = 0
curFaceValue = (
lineData["label"]["surface"].lower() if isFaceLine else "obverse"
)
cv.feature(curFace, face=curFaceValue)
if isFaceLine:
debug(atf)
continue
if lineType == "ColumnAtLine":
col = lineData["label"]["column"]
primeInfo = lineData["label"]["status"]
primecol = len(primeInfo) > 0 and "PRIME" in primeInfo
primecolAtt = dict(primecol=1) if primecol else {}
debug(atf)
continue
if lineType == "ControlLine":
content = lineData["content"][0]["value"]
if content.startswith("note:"):
lineData["content"][0]["value"] = content[5:].lstrip()
lineData["prefix"] = "#note: "
lineType = "NoteLine"
if content.startswith("tr."):
content = content.split(".", 1)[1]
parts = content.split(" ", 1)
lan = parts[0]
if lan in languages:
content = parts[1] if len(parts[1]) > 1 else ""
else:
(lan, content) = content.split(":", 1)
lan = lan.split(".", 1)[0]
if lan not in languages:
error(f"Unknown language {lan}", stop=True)
lineData["content"][0]["value"] = content
lineType = "TranslationLine"
lineData["prefix"] = f"#tr.{lan}: "
isEmptyLine = lineType == "EmptyLine"
isTextLine = lineType == "TextLine"
if isEmptyLine or isTextLine:
lln += 1
curLine = cv.node("line")
cv.feature(curLine, lln=lln, atf=atf)
if isEmptyLine:
if not skipLine:
debug(atf)
curSlot = cv.slot()
cv.feature(curSlot, type="empty")
prevLine = curLine
if col is None:
lnno = f"!{lln}"
else:
cv.feature(curLine, col=col, **primecolAtt)
colprime = "'" if primecol else ""
colno = f"{col}{colprime}"
lnno = f"!{colno}:{lln}"
cv.feature(curLine, lnno=lnno)
else:
numberData = lineData["lineNumber"]
ln = numberData["number"]
primeln = numberData["hasPrime"]
primelnAtt = dict(primeln=1) if primeln else {}
lnprime = "'" if primeln else ""
lnno = f"{ln}{lnprime}"
if col is not None:
cv.feature(curLine, col=col, **primecolAtt)
colprime = "'" if primecol else ""
colno = f"{col}{colprime}"
lnno = f"{colno}:{lnno}"
if LINE is not None:
skipLine = lnno.replace("'", "") != LINE
if skipLine:
continue
debug(atf)
cv.feature(curLine, ln=ln, **primelnAtt, lnno=lnno)
lineContent = lineData["content"]
lineSigns = []
for wordData in lineContent:
if "parts" in wordData:
lineSigns.append([True, wordData, False])
for signData in wordData["parts"]:
hasSubs = False
for (kind, tp) in (
("parts", "Determinative"),
("tokens", "Variant"),
):
if kind in signData:
hasSubs = True
end = len(signData[kind])
for (i, subPart) in enumerate(signData[kind]):
lineSigns.append(
[
False,
subPart,
False,
tp,
i,
i == end - 1,
]
)
if not hasSubs:
lineSigns.append([False, signData, False])
else:
lineSigns.append([None, wordData, None])
for entry in reversed(lineSigns):
isWord = entry[0]
if isWord:
entry[2] = True
break
atWordEnd = True
for entry in reversed(lineSigns):
isWord = entry[0]
if isWord is False:
if atWordEnd:
entry[2] = True
atWordEnd = False
elif isWord is True:
atWordEnd = True
curWord = None
curSign = None
nextPre = ""
for (e, entry) in enumerate(lineSigns):
isWord = entry[0]
data = entry[1]
isLast = entry[2]
where = None if len(entry) < 4 else entry[3]
contentType = data["type"]
if isWord:
if curWord:
cv.terminate(curWord)
atf = data["value"]
sym = data["cleanValue"]
curWord = cv.node("word")
wordAfter = "\n" if isLast else " "
cv.feature(curWord, atf=atf, sym=sym, after=wordAfter)
if contentType in {"Word", "LoneDeterminative"}:
lemmaData = data["uniqueLemma"]
lemma = ", ".join(lemmaData)
atts = {} if lang is None else dict(lang=lang)
cv.feature(curWord, type="word", **atts, lemma=lemma)
elif isWord is False:
if len(entry) > 3:
tp = entry[3]
where = entry[4]
atEnd = entry[5]
atts = {}
if tp == "Determinative":
if where == 0:
doCluster(data, "det", on=True, off=False)
nextPre += "{"
doSign(data, wordAfter, isLast)
if atEnd:
cv.feature(
curSign,
atfpost=(cv.get("atfpost", curSign) or "")
+ "}",
)
doCluster(data, "det", on=False, off=True)
elif tp == "Variant":
doSign(data, wordAfter, isLast, variant=where + 1)
if not atEnd:
cv.feature(
curSign,
atfpost=(cv.get("atfpost", curSign) or "")
+ "/",
)
else:
error(f"Unknown complex type: {tp}", stop=True)
else:
doSign(data, wordAfter, isLast)
else:
doSign(data, " ", isLast)
if nextPre != "":
error(
f"dangling pre material at last sign of line: {nextPre}",
stop=True,
)
cv.terminate(curWord)
prevLine = curLine
cv.terminate(curLine)
else:
if skipLine:
continue
debug(atf)
if lineType == "ControlLine":
error(f"Unknown ControlLine: {atf[0:40]}")
continue
content = " ".join(c["cleanValue"] for c in lineData["content"])
if not content:
continue
contents = {}
if lineType == "RulingDollarLine":
tp = "ruling"
elif lineType == "DiscourseAtLine":
tp = "colofon"
elif lineType == "NoteLine":
tp = "note"
elif lineType == "TranslationLine":
content = TRANS_RE.sub(r"\1", content)
if not content:
continue
parts = lineData["prefix"][1:].split(":", 1)[0].split(".")
lan = parts[1]
if lan not in languages:
error(f"Unknown language {lan}", stop=True)
else:
tp = f"tr@{lan}"
if len(parts) > 2 and parts[2]:
content = f"{parts[2]}:{content}"
contents["trans"] = 1
elif lineType == "LooseDollarLine":
tp = "comment"
elif lineType == "SealDollarLine":
tp = "seal"
orig = cv.get(tp, prevLine)
content = content if not orig else f"{orig}\n{content}"
contents[tp] = content
cv.feature(prevLine, **contents)
if curFace:
terminateClusters()
cv.terminate(curFace)
cv.terminate(curDoc)
# delete meta data of unused features
for feat in featureMeta:
if not cv.occurs(feat):
error(f"feature {feat} does not occur")
cv.meta(feat)
# TF LOADING (to test the generated TF)
def loadTf():
TF = Fabric(locations=[OUT_DIR])
allFeatures = TF.explore(silent=True, show=True)
loadableFeatures = allFeatures["nodes"] + allFeatures["edges"]
api = TF.load(loadableFeatures, silent=False)
if api:
msg(f"max node = {api.F.otype.maxNode}")
msg("Frequency of readings")
msg(api.F.reading.freqList()[0:20])
msg("Frequency of grapheme")
msg(api.F.grapheme.freqList()[0:20])
# MAIN
command = None if len(sys.argv) <= 1 else sys.argv[1]
msg(f"JSON to TF converter for {REPO}")
msg(f"ATF source version = {VERSION_SRC}")
msg(f"TF target version = {VERSION_TF}")
if command is None:
generateTf = True
good = convert()
if good:
loadTf()
elif command.startswith("P"):
generateTf = True
parts = command.split(":", 1)
PNUMBER = parts[0]
if len(parts) > 1:
parts = parts[1].split(":", 1)
FACE = parts[0]
if len(parts) > 1:
LINE = parts[1].replace("'", "")
convert()
elif command == "-skipload":
generateTf = True
convert()
elif command == "-skipgen":
loadTf()
else:
msg(f"Wrong command {command} !\n{HELP}")
| 33.390326 | 122 | 0.467356 | import sys
import os
import re
import json
import yaml
from shutil import rmtree
from tf.fabric import Fabric
from tf.convert.walker import CV
HELP = """
python3 tfFromJson.py
Generate TF and if successful, load it
python3 tfFromJson.py -Pnumber
python3 tfFromJson.py -Pnumber:obverse
python3 tfFromJson.py -Pnumber:obverse:2:1
Generate TF, only this Pnumber, face, line, do not load it
Primes in numbers are not relevant
python3 tfFromJson.py -skipgen
Load TF
python3 tfFromJson.py -skipload
Generate TF but do not load it
"""
TEST = """
P365742-K.2354: 63.: [x x x x {ši]m}LI GAZ KI ZI₃.KUM HI.HI ina A GAZI{sar} SILA₁₁-aš LAL IGI GIG tu-gal-lab EN TI.LA LAL₂
P394523-K.2573: 17'.: [ina A.M]E[Š? ... UKU]Š₂.LAGAB U₂-BABBAR x [... t]e-qi₂?#
P394520-K.2570: 9'.: [DIŠ NA IGI.MIN-šu₂ GIG? ... {u₂}EME.U]R.GI₇ {u₂}IN₆.UŠ ina ZI₃.KUM HE.HE ina GEŠTIN N[A]G?
"""
GH = os.path.expanduser("~/github")
ORG = "Nino-cunei"
REPO = "ninmed"
REPO_DIR = f"{GH}/{ORG}/{REPO}"
REPORT_DIR = f"{REPO_DIR}/report"
DECL_PATH = f"{REPO_DIR}/yaml"
META_DECL_FILE = f"{DECL_PATH}/meta.yaml"
LINE_TYPES = set(
"""
EmptyLine
TextLine
ColumnAtLine
DiscourseAtLine
SurfaceAtLine
ControlLine
NoteLine
TranslationLine
LooseDollarLine
RulingDollarLine
SealDollarLine
""".strip().split()
)
def readYaml(fileName):
if os.path.exists(fileName):
with open(fileName) as y:
y = yaml.load(y, Loader=yaml.FullLoader)
else:
y = {}
return y
META_DECL = readYaml(META_DECL_FILE)
VERSION_SRC = META_DECL["versionSrc"]
VERSION_TF = META_DECL["versionTf"]
IN_DIR = f"{REPO_DIR}/source/json/{VERSION_SRC}"
TF_DIR = f"{REPO_DIR}/tf"
OUT_DIR = f"{TF_DIR}/{VERSION_TF}"
META_FIELDS = {
"collection": ("collection", "str"),
"description": ("description", "str"),
"museum.name": ("museum", "str"),
"cdliNumber": ("pnumber", "str"),
"number": ("docnumber", "str"),
"publication": ("publication", "str"),
}
flagging = {
"*": "collated",
"!": "remarkable",
"?": "question",
"#": "damage",
}
clusterSpecs = (
("BrokenAway", "missing", "[", "]", None),
("PerhapsBrokenAway", "uncertain", "(", ")", None),
("Removal", "excised", "<<", ">>", None),
("AccidentalOmission", "supplied", "<", ">", None),
("DocumentOrientedGloss", "gloss", "{(", ")}", None),
("Determinative", "det", "{", "}", None),
("LoneDeterminative", "det", "{", "}", None),
("Erasure", "erasure", "°", "°", "\\"),
)
clusterType = {x[0]: x[1] for x in clusterSpecs}
clusterChar = {x[1]: {True: x[2], False: x[3], None: x[4]} for x in clusterSpecs}
commentTypes = set(
"""
ruling
colofon
note
comment
seal
tr@en
""".strip().split()
)
languages = set(
"""
en
""".strip().split()
)
LIGA = "␣"
TRANS_RE = re.compile(r"""@i\{([^}]*)\}""", re.S)
slotType = "sign"
generic = {
"name": META_DECL["name"],
"editor": META_DECL["editor"],
"project": META_DECL["project"],
"converters": META_DECL["converters"],
}
otext = {
"fmt:text-orig-full": "{atfpre}{atf}{atfpost}{after}",
"fmt:text-orig-plain": "{sym}{after}",
"sectionFeatures": "pnumber,face,lnno",
"sectionTypes": "document,face,line",
}
intFeatures = (
set(
"""
ln
lln
col
number
primeln
primecol
trans
variant
""".strip().split()
)
| set(flagging.values())
| set(clusterType.values())
| {x[1][0] for x in META_FIELDS.items() if x[1][1] == "int"}
)
featureMeta = {
"after": {"description": "what comes after a sign or word (- or space)"},
"atf": {"description": "full atf of a sign"},
"atfpost": {"description": "cluster characters that follow a sign or word"},
"atfpre": {"description": "cluster characters that precede a sign or word"},
"col": {"description": "ATF column number"},
"collated": {"description": "whether a sign is collated (*)"},
"collection": {"description": 'collection name from metadata field "collection"'},
"colofon": {"description": "colofon comment to a line"},
"comment": {"description": "comment to a line"},
"damage": {"description": "whether a sign is damaged"},
"description": {"description": 'description from metadata field "description"'},
"det": {
"description": "whether a sign is a determinative gloss - between { }",
},
"docnumber": {"description": 'document number from metadata field "number"'},
"erasure": {
"description": (
"whether a sign is in an erasure - between ° \\ °: "
"1: between ° and \\; 2: between \\ and °"
),
},
"excised": {
"description": "whether a sign is excised - between << >>",
},
"face": {"description": "full name of a face including the enclosing object"},
"flags": {"description": "sequence of flags after a sign"},
"gloss": {
"description": "whether a sign belongs to a gloss - between {( )}",
},
"grapheme": {"description": "grapheme of a sign"},
"lang": {
"description": (
"language of a document, word, or sign:"
" absent: Akkadian; sux: Sumerian; sb: Standard Babylonian"
)
},
"lemma": {
"description": (
"lemma of a word:"
"comma-separated values of the uniqueLemma field in the JSON source"
)
},
"lln": {"description": "logical line number of a numbered line"},
"ln": {"description": "ATF line number of a numbered line, without prime"},
"lnno": {
"description": (
"ATF line number, may be $ or #, with prime; column number prepended"
),
},
"missing": {
"description": "whether a sign is missing - between [ ]",
},
"modifiers": {"description": "sequence of modifiers after a sign"},
"museum": {"description": 'museum name from metadata field "museum.name"'},
"note": {"description": "note comment to a line"},
"number": {"description": "numeric value of a number sign"},
"pnumber": {"description": "P number of a document"},
"primecol": {"description": "whether a prime is present on a column number"},
"primeln": {"description": "whether a prime is present on a line number"},
"publication": {
"description": 'publication info from metadata field "publication"'
},
"question": {"description": "whether a sign has the question flag (?)"},
"reading": {"description": "reading of a sign"},
"remarkable": {"description": "whether a sign is remarkable (!)"},
"ruling": {"description": "ruling comment to a line"},
"seal": {"description": "seal comment to a line"},
"sym": {"description": "essential part of a sign or of a word"},
"supplied": {
"description": "whether a sign is supplied - between < >",
},
"trans": {"description": "whether a line has a translation"},
"tr@en": {"description": "english translation of a line"},
"type": {"description": "name of a type of cluster or kind of sign"},
"uncertain": {"description": "whether a sign is uncertain - between ( )"},
"variant": {
"description": (
"if sign is part of a variant pair, "
"this is the sequence number of the variant (1 or 2)"
)
},
}
def msg(m):
sys.stdout.write(f"{m}\n")
def getJsonFiles():
filePaths = []
def walk(path):
with os.scandir(path) as it:
for entry in it:
name = entry.name
if not name.startswith(".") and entry.is_dir():
walk(f"{path}/{name}")
elif name.endswith(".json") and entry.is_file:
filePaths.append(f"{path}/{name}")
walk(IN_DIR)
return sorted(filePaths)
def readJsonFile(path):
with open(path) as fh:
data = json.load(fh)
return data
def writeReport(fName, lines):
with open(f"{REPORT_DIR}/{fName}", "w") as fh:
for line in lines:
fh.write(f"{line}\n")
def getConverter():
TF = Fabric(locations=OUT_DIR)
return CV(TF)
PNUMBER = None
FACE = None
LINE = None
def convert():
if generateTf:
if os.path.exists(OUT_DIR):
rmtree(OUT_DIR)
os.makedirs(OUT_DIR, exist_ok=True)
cv = getConverter()
return cv.walk(
director,
slotType,
otext=otext,
generic=generic,
intFeatures=intFeatures,
featureMeta=featureMeta,
generateTf=generateTf,
)
def director(cv):
DEBUG = False
curClusters = {cluster: (None, 0) for cluster in clusterType.values()}
def debug(m):
if DEBUG:
msg(f"INFO ======> {m}")
sys.stdout.flush()
def error(m, stop=False):
msg(f"ERROR ======> {m}")
sys.stdout.flush()
if stop:
quit()
def terminateClusters():
for tp in curClusters:
(node, val) = curClusters[tp]
if node:
cv.terminate(node)
curClusters[tp] = (None, 0)
def doCluster(data, cluster, on=None, off=None):
makeOn = data["side"] == "LEFT" if on is None else on
makeOff = data["side"] == "RIGHT" if off is None else off
makeAlt = data["side"] == "CENTER" if off is None and on is None else off and on
status = True if makeOn else False if makeOff else None
if status is None and not makeAlt:
error(f"cluster {cluster} not on and not off and not alt", stop=True)
(clusterNode, clusterVal) = curClusters[cluster]
if status is True:
if clusterNode is not None:
error(f"cluster {cluster} is nesting", stop=True)
clusterNode = cv.node("cluster")
curClusters[cluster] = (clusterNode, 1)
cv.feature(clusterNode, type=cluster)
elif status is False:
if clusterNode is None:
error(f"cluster {cluster} is spuriously closed", stop=True)
else:
cv.terminate(clusterNode)
curClusters[cluster] = (None, 0)
elif status is None:
if clusterNode is None or clusterVal == 0:
error(f"cluster {cluster} is missing first part", stop=True)
elif clusterVal > 1:
error(f"cluster {cluster} has too many parts", stop=True)
else:
curClusters[cluster] = (clusterNode, 2)
return status
def getClusters():
return {
cluster: val
for (cluster, (node, val)) in curClusters.items()
if node is not None
}
def doFlags(data, cur, extraFlag):
flagList = data.get("flags", [])
if extraFlag:
flagList.append(extraFlag)
if len(flagList):
flags = "".join(flagList)
atts = {flagging[flag]: 1 for flag in flags}
cv.feature(cur, flags=flags, **atts)
def doModifiers(data, cur):
modifierList = data.get("modifiers", [])
if len(modifierList):
modifiers = "".join(m[1:] for m in modifierList)
cv.feature(cur, modifiers=f"@{modifiers}")
def doSign(data, wordAfter, isLast, **features):
nonlocal curSign
nonlocal nextPre
nonlocal lang
signType = data["type"]
after = wordAfter if isLast else ""
if signType in {
"AccidentalOmission",
"Erasure",
"Removal",
"BrokenAway",
"PerhapsBrokenAway",
"DocumentOrientedGloss",
}:
cluster = clusterType[signType]
status = doCluster(data, cluster)
if status is True:
nextPre += clusterChar[cluster][status]
elif status is False:
cv.feature(
curSign,
after=(cv.get("after", curSign) or "") + after,
atfpost=(cv.get("atfpost", curSign) or "")
+ clusterChar[cluster][status],
)
elif status is None:
nextPre += clusterChar[cluster][status]
elif signType == "LanguageShift":
lang = data["cleanValue"][1:]
nextPre += f"%{lang} "
if lang == "akk":
lang = None
elif signType == "Joiner":
if curSign is not None:
cv.feature(curSign, after=data["value"])
else:
atf = data["value"]
atfPre = ""
atfPost = ""
extraFlag = ""
indexStart = atf.find("[")
indexEnd = atf.find("]")
startMissingInternal = atf != "[" and indexStart >= 0
endMissingInternal = atf != "]" and indexEnd >= 0
if startMissingInternal and endMissingInternal:
if indexStart < indexEnd:
extraFlag = "#"
atf = atf.replace("[", "").replace("]", "") + extraFlag
else:
atf = atf.replace("[", "").replace("]", "")
elif startMissingInternal:
atf = atf.replace("[", "")
atfPre = "["
doCluster(data, "missing", on=True, off=False)
curSign = cv.slot()
if endMissingInternal and not startMissingInternal:
atf = atf.replace("]", "")
atfPost += "]"
doCluster(data, "missing", on=False, off=True)
thisPre = nextPre + atfPre
atfPreFeat = dict(atfpre=thisPre) if thisPre else {}
atfPre = ""
nextPre = ""
atfPostFeat = dict(atfpost=atfPost) if atfPost else {}
atfPost = ""
cv.feature(
curSign,
**atfPreFeat,
**atfPostFeat,
atf=atf,
**getClusters(),
**features,
)
doFlags(data, curSign, extraFlag)
doModifiers(data, curSign)
sym = data["cleanValue"]
feats = {}
if signType == "UnknownNumberOfSigns":
tp = "ellipsis"
elif signType == "UnidentifiedSign":
tp = "unknown"
elif signType == "UnclearSign":
tp = "unknown"
elif signType == "Number":
tp = "numeral"
feats = dict(number=sym)
elif signType == "Logogram":
tp = "grapheme"
feats = dict(grapheme=sym)
elif signType == "Reading":
tp = "reading"
sign = data.get("sign", None)
reading = data["name"]
if sign is None:
feats = dict(reading=reading)
else:
grapheme = sign["cleanValue"]
feats = dict(reading=reading, grapheme=grapheme)
elif signType == "Joiner":
tp = "joiner"
elif signType == "Divider":
tp = "wdiv"
after = wordAfter
elif signType == "Determinative" or signType == "Variant":
error(f"nested {signType} Signs", stop=True)
else:
error(f"unrecognized sign type {signType}", stop=True)
cv.feature(curSign, type=tp, after=after, sym=sym, **feats)
paths = getJsonFiles()
skipFace = FACE is not None
skipLine = LINE is not None
for (i, path) in enumerate(paths):
fileName = path.split("/")[-1].rsplit(".", 1)[0]
docData = readJsonFile(path)
metaData = {}
for (origField, (field, tp)) in META_FIELDS.items():
origFields = origField.split(".", 1)
metaData[field] = (
docData[origField]
if len(origFields) == 1
else docData[origFields[0]][origFields[1]]
)
pNumber = metaData["pnumber"]
if PNUMBER is not None and PNUMBER != pNumber:
continue
textData = docData["text"]["allLines"]
nLines = len(textData)
msg(f"{i + 1:>3} {nLines:>4} lines in {fileName}")
if nLines == 0:
continue
curDoc = cv.node("document")
cv.feature(curDoc, **metaData)
curFace = None
curFaceValue = None
col = None
primecol = None
lln = 0
prevLine = None
for lineData in textData:
lang = None
lineType = lineData["type"]
content = " ".join(c["value"] for c in lineData["content"])
atf = f"{lineData['prefix']} {content}"
isFaceLine = lineType == "SurfaceAtLine"
if isFaceLine:
thisFaceValue = lineData["label"]["surface"].lower()
if FACE is not None:
skipFace = thisFaceValue != FACE
if thisFaceValue == curFaceValue:
continue
if skipFace:
continue
if isFaceLine or not curFace:
if isFaceLine and curFace:
terminateClusters()
cv.terminate(curFace)
curFace = cv.node("face")
col = None
primecol = None
lln = 0
curFaceValue = (
lineData["label"]["surface"].lower() if isFaceLine else "obverse"
)
cv.feature(curFace, face=curFaceValue)
if isFaceLine:
debug(atf)
continue
if lineType == "ColumnAtLine":
col = lineData["label"]["column"]
primeInfo = lineData["label"]["status"]
primecol = len(primeInfo) > 0 and "PRIME" in primeInfo
primecolAtt = dict(primecol=1) if primecol else {}
debug(atf)
continue
if lineType == "ControlLine":
content = lineData["content"][0]["value"]
if content.startswith("note:"):
lineData["content"][0]["value"] = content[5:].lstrip()
lineData["prefix"] = "#note: "
lineType = "NoteLine"
if content.startswith("tr."):
content = content.split(".", 1)[1]
parts = content.split(" ", 1)
lan = parts[0]
if lan in languages:
content = parts[1] if len(parts[1]) > 1 else ""
else:
(lan, content) = content.split(":", 1)
lan = lan.split(".", 1)[0]
if lan not in languages:
error(f"Unknown language {lan}", stop=True)
lineData["content"][0]["value"] = content
lineType = "TranslationLine"
lineData["prefix"] = f"#tr.{lan}: "
isEmptyLine = lineType == "EmptyLine"
isTextLine = lineType == "TextLine"
if isEmptyLine or isTextLine:
lln += 1
curLine = cv.node("line")
cv.feature(curLine, lln=lln, atf=atf)
if isEmptyLine:
if not skipLine:
debug(atf)
curSlot = cv.slot()
cv.feature(curSlot, type="empty")
prevLine = curLine
if col is None:
lnno = f"!{lln}"
else:
cv.feature(curLine, col=col, **primecolAtt)
colprime = "'" if primecol else ""
colno = f"{col}{colprime}"
lnno = f"!{colno}:{lln}"
cv.feature(curLine, lnno=lnno)
else:
numberData = lineData["lineNumber"]
ln = numberData["number"]
primeln = numberData["hasPrime"]
primelnAtt = dict(primeln=1) if primeln else {}
lnprime = "'" if primeln else ""
lnno = f"{ln}{lnprime}"
if col is not None:
cv.feature(curLine, col=col, **primecolAtt)
colprime = "'" if primecol else ""
colno = f"{col}{colprime}"
lnno = f"{colno}:{lnno}"
if LINE is not None:
skipLine = lnno.replace("'", "") != LINE
if skipLine:
continue
debug(atf)
cv.feature(curLine, ln=ln, **primelnAtt, lnno=lnno)
lineContent = lineData["content"]
lineSigns = []
for wordData in lineContent:
if "parts" in wordData:
lineSigns.append([True, wordData, False])
for signData in wordData["parts"]:
hasSubs = False
for (kind, tp) in (
("parts", "Determinative"),
("tokens", "Variant"),
):
if kind in signData:
hasSubs = True
end = len(signData[kind])
for (i, subPart) in enumerate(signData[kind]):
lineSigns.append(
[
False,
subPart,
False,
tp,
i,
i == end - 1,
]
)
if not hasSubs:
lineSigns.append([False, signData, False])
else:
lineSigns.append([None, wordData, None])
for entry in reversed(lineSigns):
isWord = entry[0]
if isWord:
entry[2] = True
break
atWordEnd = True
for entry in reversed(lineSigns):
isWord = entry[0]
if isWord is False:
if atWordEnd:
entry[2] = True
atWordEnd = False
elif isWord is True:
atWordEnd = True
curWord = None
curSign = None
nextPre = ""
for (e, entry) in enumerate(lineSigns):
isWord = entry[0]
data = entry[1]
isLast = entry[2]
where = None if len(entry) < 4 else entry[3]
contentType = data["type"]
if isWord:
if curWord:
cv.terminate(curWord)
atf = data["value"]
sym = data["cleanValue"]
curWord = cv.node("word")
wordAfter = "\n" if isLast else " "
cv.feature(curWord, atf=atf, sym=sym, after=wordAfter)
if contentType in {"Word", "LoneDeterminative"}:
lemmaData = data["uniqueLemma"]
lemma = ", ".join(lemmaData)
atts = {} if lang is None else dict(lang=lang)
cv.feature(curWord, type="word", **atts, lemma=lemma)
elif isWord is False:
if len(entry) > 3:
tp = entry[3]
where = entry[4]
atEnd = entry[5]
atts = {}
if tp == "Determinative":
if where == 0:
doCluster(data, "det", on=True, off=False)
nextPre += "{"
doSign(data, wordAfter, isLast)
if atEnd:
cv.feature(
curSign,
atfpost=(cv.get("atfpost", curSign) or "")
+ "}",
)
doCluster(data, "det", on=False, off=True)
elif tp == "Variant":
doSign(data, wordAfter, isLast, variant=where + 1)
if not atEnd:
cv.feature(
curSign,
atfpost=(cv.get("atfpost", curSign) or "")
+ "/",
)
else:
error(f"Unknown complex type: {tp}", stop=True)
else:
doSign(data, wordAfter, isLast)
else:
doSign(data, " ", isLast)
if nextPre != "":
error(
f"dangling pre material at last sign of line: {nextPre}",
stop=True,
)
cv.terminate(curWord)
prevLine = curLine
cv.terminate(curLine)
else:
if skipLine:
continue
debug(atf)
if lineType == "ControlLine":
error(f"Unknown ControlLine: {atf[0:40]}")
continue
content = " ".join(c["cleanValue"] for c in lineData["content"])
if not content:
continue
contents = {}
if lineType == "RulingDollarLine":
tp = "ruling"
elif lineType == "DiscourseAtLine":
tp = "colofon"
elif lineType == "NoteLine":
tp = "note"
elif lineType == "TranslationLine":
content = TRANS_RE.sub(r"\1", content)
if not content:
continue
parts = lineData["prefix"][1:].split(":", 1)[0].split(".")
lan = parts[1]
if lan not in languages:
error(f"Unknown language {lan}", stop=True)
else:
tp = f"tr@{lan}"
if len(parts) > 2 and parts[2]:
content = f"{parts[2]}:{content}"
contents["trans"] = 1
elif lineType == "LooseDollarLine":
tp = "comment"
elif lineType == "SealDollarLine":
tp = "seal"
orig = cv.get(tp, prevLine)
content = content if not orig else f"{orig}\n{content}"
contents[tp] = content
cv.feature(prevLine, **contents)
if curFace:
terminateClusters()
cv.terminate(curFace)
cv.terminate(curDoc)
for feat in featureMeta:
if not cv.occurs(feat):
error(f"feature {feat} does not occur")
cv.meta(feat)
def loadTf():
TF = Fabric(locations=[OUT_DIR])
allFeatures = TF.explore(silent=True, show=True)
loadableFeatures = allFeatures["nodes"] + allFeatures["edges"]
api = TF.load(loadableFeatures, silent=False)
if api:
msg(f"max node = {api.F.otype.maxNode}")
msg("Frequency of readings")
msg(api.F.reading.freqList()[0:20])
msg("Frequency of grapheme")
msg(api.F.grapheme.freqList()[0:20])
command = None if len(sys.argv) <= 1 else sys.argv[1]
msg(f"JSON to TF converter for {REPO}")
msg(f"ATF source version = {VERSION_SRC}")
msg(f"TF target version = {VERSION_TF}")
if command is None:
generateTf = True
good = convert()
if good:
loadTf()
elif command.startswith("P"):
generateTf = True
parts = command.split(":", 1)
PNUMBER = parts[0]
if len(parts) > 1:
parts = parts[1].split(":", 1)
FACE = parts[0]
if len(parts) > 1:
LINE = parts[1].replace("'", "")
convert()
elif command == "-skipload":
generateTf = True
convert()
elif command == "-skipgen":
loadTf()
else:
msg(f"Wrong command {command} !\n{HELP}")
| true | true |
1c3a8ef5fc397e1e41b51b5816497d1dcb8e05b6 | 114,734 | py | Python | theano/gpuarray/opt.py | JimmyRetza/Theano | 72d83bce0d547d54ab3513bcba35c166979f7a6f | [
"BSD-3-Clause"
] | 1 | 2020-05-01T11:09:48.000Z | 2020-05-01T11:09:48.000Z | theano/gpuarray/opt.py | JimmyRetza/Theano | 72d83bce0d547d54ab3513bcba35c166979f7a6f | [
"BSD-3-Clause"
] | null | null | null | theano/gpuarray/opt.py | JimmyRetza/Theano | 72d83bce0d547d54ab3513bcba35c166979f7a6f | [
"BSD-3-Clause"
] | 1 | 2018-04-06T08:31:11.000Z | 2018-04-06T08:31:11.000Z | from __future__ import absolute_import, print_function, division
import copy
import numpy as np
import logging
import pdb
import time
from six import iteritems
from six.moves import xrange
import sys
import theano
from theano import tensor, scalar, gof, config
from theano.compile import optdb
from theano.compile.ops import shape_i
from theano.gof import (local_optimizer, EquilibriumDB, TopoOptimizer,
LocalGroupDB,
SequenceDB, Optimizer, DB, toolbox, graph)
from theano.gof.opt import (LocalMetaOptimizer, copy_stack_trace,
inherit_stack_trace)
from theano.ifelse import IfElse
from theano.misc.ordered_set import OrderedSet
from theano.scalar.basic import Scalar, Pow, Cast
from theano.scalar.basic import log, neg, true_div
from theano.scalar.basic_scipy import Erfinv, Erfcinv
from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.nnet import bn, conv3d2d
from theano.tensor.nnet.conv import ConvOp
from theano.tensor.nnet.blocksparse import SparseBlockGemv, SparseBlockOuter
from theano.tensor.nnet.abstract_conv import (BaseAbstractConv,
AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs,
get_conv_output_shape)
from theano.tensor.nnet.neighbours import Images2Neibs
from theano.tensor.nnet.ctc import ConnectionistTemporalClassification
import theano.tensor.nlinalg as nlinalg
import theano.tensor.signal.pool as pool
import theano.tensor.slinalg as slinalg
from collections import Counter
from theano.tests.breakpoint import PdbBreakpoint
from .type import (GpuArrayType, GpuArrayConstant, get_context,
ContextNotDefined, move_to_gpu)
from .basic_ops import (as_gpuarray_variable, infer_context_name,
host_from_gpu, GpuToGpu,
HostFromGpu, GpuFromHost,
GpuSplit, GpuContiguous, gpu_contiguous,
GpuAlloc, GpuAllocEmpty, GpuReshape,
GpuEye, gpu_join, GpuJoin)
from .blas import (gpu_dot22, GpuGemm, GpuGer, GpuGemmBatch,
gpugemm_no_inplace, gpugemm_inplace,
gpugemmbatch_no_inplace,
gpugemv_no_inplace, gpugemv_inplace,
GpuCorrMM, GpuCorrMM_gradInputs, GpuCorrMM_gradWeights,
GpuCorr3dMM, GpuCorr3dMM_gradInputs, GpuCorr3dMM_gradWeights)
from .pool import (GpuPool, GpuMaxPoolGrad, GpuAveragePoolGrad, GpuMaxPoolRop,
GpuDownsampleFactorMaxGradGrad)
from .blocksparse import (GpuSparseBlockGemv, GpuSparseBlockOuter,
gpu_sparse_block_outer,
gpu_sparse_block_outer_inplace,
gpu_sparse_block_gemv, gpu_sparse_block_gemv_inplace)
from .nnet import (gpu_crossentropy_softmax_1hot_with_bias_dx,
gpu_crossentropy_softmax_argmax_1hot_with_bias,
gpu_softmax_with_bias, gpu_softmax)
from .elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda,
GpuCAReduceCPY, gpu_erfinv, gpu_erfcinv,
max_inputs_to_GpuElemwise)
from .subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedSubtensor,
GpuAdvancedSubtensor1,
GpuAdvancedBooleanSubtensor,
GpuAdvancedIncSubtensor,
GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20,
GpuAdvancedBooleanIncSubtensor,
GpuAllocDiag, GpuExtractDiag)
from .opt_util import alpha_merge, output_merge, pad_dims, unpad_dims
from .reduction import GpuMaxAndArgmax
from .linalg import (GpuCusolverSolve, MATRIX_STRUCTURES_SOLVE, GpuCholesky,
cusolver_available, GpuMagmaMatrixInverse, gpu_svd,
GpuMagmaCholesky, gpu_qr, GpuMagmaEigh,
GpuCublasTriangularSolve, cublas_available)
from .neighbours import GpuImages2Neibs
from .ctc import GpuConnectionistTemporalClassification
_logger = logging.getLogger("theano.gpuarray.opt")
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
# Not used for an EquilibriumOptimizer. It has the "tracks" that we need for GraphToGPUDB.
gpu_optimizer2 = EquilibriumDB()
class GraphToGPUDB(DB):
"""
Retrieves the list local optimizers based on the optimizer flag's value
from EquilibriumOptimizer by calling the method query.
"""
def query(self, *tags, **kwtags):
opt = gpu_optimizer2.query(*tags, **kwtags)
return GraphToGPU(opt.local_optimizers_all, opt.local_optimizers_map)
gpu_seqopt = SequenceDB()
gpu_seqopt.register('gpuarray_graph_optimization', GraphToGPUDB(), -0.5,
'fast_compile', 'fast_run', 'gpuarray')
gpu_seqopt.register('gpuarray_local_optimizations', gpu_optimizer, 1,
'fast_compile', 'fast_run', 'gpuarray', 'gpuarray_local_optimiziations')
gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,
'fast_compile', 'fast_run', 'gpuarray')
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register('gpuarray_opt', gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpuarray')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
def register_opt2(tracks, *tags, **kwargs):
'''
Decorator for the new GraphToGPU optimizer.
Takes an extra parameter(Op) compared to register_opt decorator.
Parameters
----------
tracks : List of Op class Or Op instance or None
The Node's Op to which optimization is being applied.
tags : String
The optimization tag to which the optimizer will be registered.
'''
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
if isinstance(local_opt, theano.gof.DB):
opt = local_opt
else:
opt = theano.gof.local_optimizer(tracks)(local_opt)
gpu_optimizer2.register(name, opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
def register_inplace(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
optdb.register(
name, TopoOptimizer(
local_opt, failure_callback=TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace', 'gpuarray', *tags)
return local_opt
return f
register_opt('fast_compile')(theano.tensor.opt.local_track_shape_i)
register_opt(final_opt=True, name='gpua_constant_folding')(
tensor.opt.constant_folding)
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
# Define a few operations to use in optimizations,
# in order to avoid introducin new CPU Ops, or useless ones.
def safe_to_gpu(x, ctx_name):
if isinstance(x.type, tensor.TensorType):
return GpuFromHost(ctx_name)(x)
else:
return x
def safe_to_cpu(x):
if isinstance(x.type, GpuArrayType):
return x.transfer('cpu')
else:
return x
gpu_log = GpuElemwise(log)
gpu_neg = GpuElemwise(neg)
gpu_true_div = GpuElemwise(true_div)
def op_lifter(OP, cuda_only=False):
"""
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
"""
def f(maker):
def local_opt(node):
if type(node.op) in OP:
# Either one of our inputs is on the gpu or
# all of our clients are on the gpu
replace = False
# TODO: Maybe set context_name with infer_context_name()?
context_name = None
# We replace if any input is a host_from_gpu
for i in node.inputs:
if (i.owner and i.owner.op == host_from_gpu and
move_to_gpu(i)):
context_name = i.owner.inputs[0].type.context_name
replace = True
break
if not replace:
# We replace if *all* clients are on the GPU
clients = [c for o in node.outputs for c in o.clients]
replace = len(clients) != 0
for c, idx in clients:
if (c == 'output' or
not isinstance(c.op, GpuFromHost)):
replace = False
# TODO: check that the clients want the same context?
if replace:
# All clients are GpuFromHost and we have at least one
context_name = clients[0][0].op.context_name
# Check if we should replace
if (not replace or
(cuda_only and
get_context(context_name).kind != b'cuda') or
any(["complex" in getattr(i, 'dtype', "")
for i in node.inputs])):
return False
# tag the inputs with the context in case
# the context was derived from the outputs
for i in node.inputs:
i.tag.context_name = context_name
new_op = maker(node.op, context_name, node.inputs, node.outputs)
# This is needed as sometimes new_op inherits from OP.
if new_op and new_op != node.op:
if isinstance(new_op, theano.Op):
new_outputs = new_op(*node.inputs, return_list=True)
to_cpu_fn = safe_to_cpu
elif isinstance(new_op, (tuple, list)):
new_outputs = new_op
to_cpu_fn = safe_to_cpu
else: # suppose it is a variable on the GPU
new_outputs = [new_op]
def to_cpu_fn(x):
return x.transfer('cpu')
# copy stack traces onto gpu outputs
# also copy the stack traces onto HostFromGpu outputs
on_cpu = []
for old_output, new_output in zip(node.outputs, new_outputs):
copy_stack_trace(old_output, new_output)
cpu = to_cpu_fn(new_output)
on_cpu.append(cpu)
copy_stack_trace(old_output, cpu)
return on_cpu
return False
local_opt.__name__ = maker.__name__
return local_optimizer(OP)(local_opt)
return f
class InputToGpuOptimizer(Optimizer):
"""
Transfer the input to the gpu to start the rolling wave.
"""
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, GpuArrayType):
continue
# If all clients are outputs or transfers don't do anything.
if (all(cl[0] == 'output' or isinstance(cl[0].op, GpuFromHost)
for cl in input.clients)):
continue
target = getattr(input.tag, 'target', None)
if target == 'cpu':
continue
if (isinstance(input.type, tensor.TensorType) and
not move_to_gpu(input)):
continue
try:
new_input = GpuFromHost(target)(input).transfer('cpu')
fgraph.replace_validate(input, new_input,
"InputToGpuOptimizer")
except TypeError:
# This could fail if the inputs are not TensorTypes
pass
except ContextNotDefined:
if hasattr(input.tag, 'target'):
raise
# If there is no context tag and no default context
# then it stays on the CPU
pass
gpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
class GraphToGPU(Optimizer):
"""
Transfer the graph as a whole to GPU instead of transferring node by node.
Parameters
----------
local_optimizers_all : List or SortedSet
The local optimizations to apply to a node.
local_optimizers_map : Dict
Dictionary object containing the mapping of Op to list of
LocalOptimizers.
"""
def __init__(self, local_optimizers_all, local_optimizers_map):
self.local_optimizers_all = local_optimizers_all
self.local_optimizers_map = local_optimizers_map
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
mapping = {}
time_opts = {}
node_created = {}
process_count = {}
t_topo = time.time()
topo = fgraph.toposort()
time_topo = time.time()
toposort_timing = time_topo - t_topo
# Building a new graph
# Iterating through inputs of graph
target = infer_context_name(*fgraph.inputs)
for i in fgraph.inputs:
if isinstance(i.type, tensor.TensorType) and move_to_gpu(i):
mapping[i] = i.transfer(getattr(i.tag, 'target', target))
else:
mapping[i] = i
for i in fgraph.variables:
if isinstance(i, theano.Constant):
mapping[i] = i
for node in topo:
for lopt in (self.local_optimizers_map.get(node.op, []) +
self.local_optimizers_map.get(type(node.op), []) +
self.local_optimizers_all):
process_count.setdefault(lopt, 0)
time_opts.setdefault(lopt, 0)
node_created.setdefault(lopt, 0)
for node in topo:
if isinstance(node.op, HostFromGpu):
mapping[node.outputs[0]] = mapping[node.inputs[0]]
continue
# Move only if any of the inputs are on the GPU.
move_to_GPU = False
context_name = None
for i in [mapping[i] for i in node.inputs]:
if isinstance(i.type, GpuArrayType):
context_name = i.type.context_name
move_to_GPU = True
break
if (not move_to_GPU and
isinstance(node.op, (theano.tensor.Alloc,
theano.tensor.AllocEmpty,
theano.tensor.basic.Eye))):
# If the Alloc[Empty] have a client that will be moved
# to the GPU, we should move the Alloc* on the GPU.
# We approximate this by supposing that if we have an
# optimization for one of the clients op, then we will
# move the client to the GPU.
for c, _ in node.outputs[0].clients:
if (c != 'output' and
(self.local_optimizers_map.get(c.op, []) +
self.local_optimizers_map.get(type(c.op), []))):
move_to_GPU = True
new_ops = None
if move_to_GPU and any(["complex" in getattr(i, 'dtype', "")
for i in node.inputs]):
move_to_GPU = False
# Apply the lifter
if move_to_GPU:
for lopt in (self.local_optimizers_map.get(node.op, []) +
self.local_optimizers_map.get(type(node.op), []) +
self.local_optimizers_all):
t_opt = time.time()
new_ops = lopt.transform(node.op, context_name,
[mapping[i] for i in node.inputs],
node.outputs)
t_opt2 = time.time()
time_opts[lopt] += t_opt2 - t_opt
if new_ops:
process_count[lopt] += 1
break
outputs = []
if isinstance(new_ops, theano.Op):
with inherit_stack_trace(node.outputs):
outputs = new_ops(*[mapping[i] for i in node.inputs], return_list=True)
elif not new_ops:
newnode = node.clone_with_new_inputs([mapping.get(i) for i in node.inputs])
outputs = newnode.outputs
elif isinstance(new_ops, (tuple, list)):
outputs = new_ops
elif isinstance(new_ops, theano.Variable):
outputs = [new_ops]
for old_output, new_output in zip(node.outputs, outputs):
copy_stack_trace(old_output, new_output)
if new_ops:
node_created[lopt] += len(graph.ops([mapping[i] for i in node.inputs], outputs))
if any([getattr(old_o, 'dtype', None) != getattr(new_o, 'dtype', None)
for old_o, new_o in zip(outputs, node.outputs)]):
_logger.warning(
"The optimization %s returned bad dtype. Skipping it."
" Write to theano-dev mailing list about this." %
str(lopt))
newnode = node.clone_with_new_inputs([mapping.get(i) for i in node.inputs])
outputs = newnode.outputs
for new_o, old_o in zip(outputs, node.outputs):
assert len(outputs) == len(node.outputs)
mapping[old_o] = new_o
new_nodes = []
for o in fgraph.outputs:
new_o = mapping[o]
if new_o.type != o.type:
assert isinstance(o.type, tensor.TensorType)
assert isinstance(new_o.type, GpuArrayType)
# This condition is needed in the case one input is an
# output of the graph. Without this, it would
# introduce cycle as we don't replace correctly that
# case. It would also add extra transfer to/from the
# gpu.
if (new_o.owner and
isinstance(new_o.owner.op, GpuFromHost) and
new_o.owner.inputs[0].type == o.type):
new_o = new_o.owner.inputs[0]
else:
new_o = copy_stack_trace(o, safe_to_cpu(new_o))
new_nodes.append(new_o)
fgraph.replace_all_validate(zip(fgraph.outputs, new_nodes),
reason=self.__class__.__name__)
return (self, toposort_timing, time_opts, node_created, process_count)
@staticmethod
def print_profile(stream, prof, level=0):
(opt, toposort_timing, time_opts, node_created, process_count) = prof
blanc = (' ' * level)
print(blanc, "GraphToGPUOptimizer", end=' ', file=stream)
print(blanc, getattr(opt, "name",
getattr(opt, "__name__", "")), file=stream)
print(blanc, " time io_toposort %.3fs" % toposort_timing, file=stream)
s = sum(time_opts.values())
print(blanc, "Total time taken by local optimizers %.3fs " % s, file=stream)
count_opt = []
not_used = []
not_used_time = 0
for o, count in iteritems(process_count):
if count > 0:
count_opt.append((time_opts[o], count,
node_created[o], o))
else:
not_used.append((time_opts[o], o))
not_used_time += time_opts[o]
if count_opt:
print(blanc,
' times - times applied - Node created - name:',
file=stream)
count_opt.sort()
for (t, count, n_created, o) in count_opt[::-1]:
print(blanc, ' %.3fs - %d - %d - %s' % (
t, count, n_created, o), file=stream)
print(blanc, ' %.3fs - in %d optimization that were not used (display only those with a runtime > 0)' % (
not_used_time, len(not_used)), file=stream)
not_used.sort(key=lambda nu: (nu[0], str(nu[1])))
for (t, o) in not_used[::-1]:
if t > 0:
# Skip opt that have 0 times, they probably wasn't even tried.
print(blanc + " ", ' %.3fs - %s' % (t, o), file=stream)
print(file=stream)
@staticmethod
def merge_profile(prof1, prof2):
# (opt, toposort_timing, time_opts, node_created, process_count) = prof1
local_optimizers = OrderedSet(prof1[0].local_optimizers_all).union(
prof2[0].local_optimizers_all)
def merge_dict(d1, d2):
"""
merge 2 dicts by adding the values.
"""
d = d1.copy()
for k, v in iteritems(d2):
if k in d:
d[k] += v
else:
d[k] = v
return d
local_optimizers_map = merge_dict(prof1[0].local_optimizers_map,
prof2[0].local_optimizers_map)
new_opt = GraphToGPU(local_optimizers, local_optimizers_map)
toposort_timing = prof1[1] + prof2[1]
time_opts = merge_dict(prof1[2], prof2[2])
node_created = merge_dict(prof1[3], prof2[3])
process_count = merge_dict(prof1[4], prof2[4])
return (new_opt,
toposort_timing,
time_opts,
node_created,
process_count)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print("%s%s (%i)" % (
(' ' * level), self.__class__.__name__, id(self)), file=stream)
if depth != 0:
map_values = []
for opts in self.local_optimizers_map.values():
map_values += opts
for opt in self.local_optimizers_all + map_values:
opt.print_summary(stream, level=(level + 2), depth=(depth - 1))
@local_optimizer([GpuFromHost, GpuToGpu, HostFromGpu])
def local_cut_gpu_transfers(node):
# gpu[ab] -> host -> gpub
if (isinstance(node.op, GpuFromHost) and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, HostFromGpu)):
other = node.inputs[0].owner.inputs[0]
if node.op.context_name == other.type.context_name:
return [other]
else:
return [GpuToGpu(node.op.context_name)(other)]
# ? -> gpua -> host
elif (isinstance(node.op, HostFromGpu) and
node.inputs[0].owner):
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [n2.inputs[0]]
# gpub ->
if isinstance(n2.op, GpuToGpu):
return [n2.inputs[0].transfer('cpu')]
# ? -> gpua -> gpub
elif isinstance(node.op, GpuToGpu):
# Transfer within same context
if node.inputs[0].type.context_name == node.op.context_name:
return [node.inputs[0]]
if node.inputs[0].owner:
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [as_gpuarray_variable(n2.inputs[0],
node.op.context_name)]
# gpuc ->
if isinstance(n2.op, GpuToGpu):
if node.op.context_name == n2.inputs[0].type.context_name:
return [n2.inputs[0]]
else:
return [node.op(n2.inputs[0])]
gpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
gpu_cut_copies.register('cut_gpua_constant_transfers',
tensor.opt.constant_folding,
'fast_compile', 'fast_run', 'gpuarray')
optdb['canonicalize'].register('local_cut_gpua_host_gpua',
local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
@register_opt('fast_compile')
@local_optimizer([tensor.Alloc])
def local_gpua_alloc2(node):
"""
Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)
Moves an alloc that is an input to join to the gpu.
"""
try:
get_context(None)
except ContextNotDefined:
# If there is no default context then we do not perform the move here.
return
if (isinstance(node.op, tensor.Alloc) and
all(c != 'output' and
isinstance(c.op, tensor.Join) and
all(i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:])
for c, idx in node.outputs[0].clients)):
return [GpuAlloc(None)(*node.inputs).transfer('cpu')]
@register_opt('fast_compile')
@op_lifter([tensor.Alloc])
@register_opt2([tensor.Alloc], 'fast_compile')
def local_gpuaalloc(op, context_name, inputs, outputs):
return GpuAlloc(context_name)(*inputs)
@register_opt('fast_compile')
@op_lifter([tensor.AllocEmpty])
@register_opt2([tensor.AllocEmpty], 'fast_compile')
def local_gpua_alloc_empty(op, context_name, inputs, outputs):
# We use _props_dict() to make sure that the GPU op know all the
# CPU op props.
return GpuAllocEmpty(context_name=context_name, **op._props_dict())(*inputs)
@register_opt()
@local_optimizer([GpuAlloc])
def local_gpualloc_memset_0(node):
if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
inp = node.inputs[0]
if (isinstance(inp, GpuArrayConstant) and
inp.data.size == 1 and
(np.asarray(inp.data) == 0).all()):
new_op = GpuAlloc(node.op.context_name, memset_0=True)
with inherit_stack_trace(node.outputs):
return new_op(*node.inputs, return_list=True)
# Don't register by default.
@gof.local_optimizer([GpuAllocEmpty])
def local_gpua_alloc_empty_to_zeros(node):
if isinstance(node.op, GpuAllocEmpty):
context_name = infer_context_name(*node.inputs)
z = np.asarray(0, dtype=node.outputs[0].dtype)
with inherit_stack_trace(node.outputs):
return [GpuAlloc(context_name)(
as_gpuarray_variable(z, context_name), *node.inputs)]
optdb.register('local_gpua_alloc_empty_to_zeros',
theano.tensor.opt.in2out(local_gpua_alloc_empty_to_zeros),
# After move to gpu and merge2, before inplace.
49.3,
'alloc_empty_to_zeros',)
@register_opt()
@local_optimizer([GpuContiguous])
def local_gpu_contiguous_gpu_contiguous(node):
"""
gpu_contiguous(gpu_contiguous(x)) -> gpu_contiguous(x)
"""
if isinstance(node.op, GpuContiguous):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, GpuContiguous):
return [inp]
@register_opt('fast_compile')
@op_lifter([tensor.extra_ops.CpuContiguous])
@register_opt2([tensor.extra_ops.CpuContiguous], 'fast_compile')
def local_gpua_contiguous(op, context_name, inputs, outputs):
return gpu_contiguous
@register_opt('fast_compile')
@op_lifter([tensor.Reshape])
@register_opt2([tensor.Reshape], 'fast_compile')
def local_gpua_reshape(op, context_name, inputs, outputs):
res = GpuReshape(op.ndim)
return res
@register_opt('fast_compile')
@op_lifter([tensor.Rebroadcast])
@register_opt2([tensor.Rebroadcast], 'fast_compile')
def local_gpua_rebroadcast(op, context_name, inputs, outputs):
return op(as_gpuarray_variable(inputs[0], context_name))
@register_opt('fast_compile')
@op_lifter([tensor.Flatten])
@register_opt2([tensor.Flatten], 'fast_compile')
def local_gpua_flatten(op, context_name, inputs, outputs):
shp = []
if op.outdim != 1:
shp = [inputs[0].shape[i] for i in range(op.outdim - 1)]
shp += [-1]
res = GpuReshape(op.outdim)
o = res(inputs[0], theano.tensor.as_tensor_variable(shp))
return o
@register_opt('fast_compile')
@op_lifter([tensor.Elemwise])
@register_opt2([tensor.Elemwise], 'fast_compile')
def local_gpua_elemwise(op, context_name, inputs, outputs):
scal_op = op.scalar_op
name = op.name
if name:
name = 'Gpu' + name
if len(outputs) > 1:
return
have_cuda = False
have_opencl = False
if inputs and isinstance(inputs[0].type, GpuArrayType):
kind = inputs[0].type.context.kind
if kind.startswith(b'opencl'):
have_opencl = True
elif kind.startswith(b'cuda'):
have_cuda = True
convert = {Erfinv: gpu_erfinv,
Erfcinv: gpu_erfcinv}
if scal_op.__class__ in convert:
scal_op = convert[scal_op.__class__]
if have_opencl:
_logger.warning(
'Function "%s" is not supported with OpenCL. Use "device=cuda" instead.' %
scal_op)
if not have_cuda:
return None
if not scal_op.supports_c_code(inputs, outputs):
return
res = GpuElemwise(scal_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec)
# If the elemwise operation is a pow, casts might be required on the
# inputs and or outputs because only the (float, float)->float and
# (double, double)->double cases are implemented at the moment.
if isinstance(op.scalar_op, Pow):
# Only transfer the computation on the gpu if the output dtype is
# floating point. Else, give up on the transfer to the gpu.
out_dtype = outputs[0].dtype
if out_dtype not in ['float16', 'float32', 'float64']:
return
# Transfer the inputs on the GPU and cast them to the right dtype.
new_inputs = []
for inp in inputs:
if inp.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp, context_name)))
else:
new_inputs.append(as_gpuarray_variable(inp, context_name))
# Perform the exponent on the gpu and transfer the output back to the
# cpu.
gpu_output = res(*new_inputs)
return [gpu_output]
elif op.scalar_op in (scalar.add, scalar.mul):
try:
return [split_inputs(inputs, max_inputs_to_GpuElemwise(outputs), res)]
except ValueError:
return False
else:
return res
def split_inputs(inputs, max_nb_inputs, op):
"""
For some ops like add and mul, a large number of inputs can make nvcc fail
compilation of our current code. We don't want node in the graph that can't
execute as this break DebugMode.
This should not happen for other GpuElemwise as their is only the fusion
that can generate op with too much input and it check for that.
Parameters
----------
inputs: List of theano variables.
List of inputs to node.
max_nb_inputs: int
Maximum number of inputs the node can handle without
compilation fail.
op : Theano operator instance.
Operator that should be used to rebuild the computation graph with smaller
number of inputs per node.
"""
if max_nb_inputs <= 1 and len(inputs) > 1:
raise ValueError("Can not split nodes because inputs' dimensionality and/or"
" number of outputs is too large")
while len(inputs) > max_nb_inputs:
inner_ops = []
for i in range(0, len(inputs), max_nb_inputs):
inner_ops.append(op(*inputs[i: i + max_nb_inputs]))
inputs = inner_ops
return op(*inputs)
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
optdb.register('gpua_elemwise_fusion',
# 48.5 move to gpu
# 48.6 specialize
# 49 cpu fusion
# 49.5 add destroy handler
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 49,
'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray')
inplace_gpu_elemwise_opt = tensor.opt.InplaceElemwiseOptimizer(
GpuElemwise)
optdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75,
'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray')
register_opt(tensor.opt.local_useless_elemwise)
@register_opt('fast_compile')
@op_lifter([tensor.DimShuffle])
@register_opt2([tensor.DimShuffle], 'fast_compile')
def local_gpua_dimshuffle(op, context_name, inputs, outputs):
return GpuDimShuffle(op.input_broadcastable,
op.new_order)
@register_opt('fast_compile')
@op_lifter([tensor.SpecifyShape])
@register_opt2([tensor.SpecifyShape], 'fast_compile')
def local_gpua_specifyShape(op, context_name, inputs, outputs):
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_specifyShape_graph(op, context_name, inputs, outputs)
@register_opt2([tensor.SpecifyShape], 'fast_compile')
def local_gpua_specifyShape_graph(op, context_name, inputs, outputs):
inp = [as_gpuarray_variable(inputs[0], context_name)]
inp += inputs[1:]
return tensor.specify_shape(*inp)
@register_opt('fast_compile')
@op_lifter([theano.compile.ops.Shape])
def local_gpua_shape(op, context_name, inputs, outputs):
# op_lifter will call this opt too frequently as the output is
# always on the CPU.
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_shape_graph(op, context_name, inputs, outputs)
@register_opt2([tensor.compile.ops.Shape], 'fast_compile')
def local_gpua_shape_graph(op, context_name, inputs, outputs):
return [as_gpuarray_variable(inputs[0], context_name).shape]
def gpu_print_wrapper(op, cnda):
op.old_op.global_fn(op.old_op, np.asarray(cnda))
@register_opt('fast_compile')
@op_lifter([tensor.printing.Print])
@register_opt2([tensor.printing.Print], 'fast_compile')
def local_gpua_print_op(op, context_name, inputs, outputs):
x, = inputs
with inherit_stack_trace(outputs):
gpu_x = as_gpuarray_variable(x, context_name=context_name)
new_op = op.__class__(global_fn=gpu_print_wrapper)
new_op.old_op = op
return new_op(gpu_x)
@register_opt('fast_compile')
@local_optimizer([PdbBreakpoint])
def local_gpu_pdbbreakpoint_op(node):
if isinstance(node.op, PdbBreakpoint):
old_inputs = node.inputs
old_outputs = node.outputs
new_inputs = node.inputs[:1]
input_transfered = []
# Go through the monitored variables, only transferring on GPU those
# for which the input comes from the GPU or the output will be
# transferred on the GPU.
nb_monitored_vars = len(node.outputs)
for i in range(nb_monitored_vars):
inp = old_inputs[i + 1]
out = old_outputs[i]
input_is_from_gpu = (inp.owner and
isinstance(inp.owner.op, HostFromGpu))
output_goes_to_gpu = False
for c in out.clients:
if c == 'output':
continue
if isinstance(c[0].op, GpuFromHost):
output_goes_to_gpu = True
context_name = c[0].op.context_name
break
if input_is_from_gpu:
# The op should be applied on the GPU version of the input
new_inputs.append(inp.owner.inputs[0])
input_transfered.append(True)
elif output_goes_to_gpu:
# The input should be transferred to the gpu
new_inputs.append(as_gpuarray_variable(inp, context_name))
input_transfered.append(True)
else:
# No transfer is required.
new_inputs.append(inp)
input_transfered.append(False)
# Only continue the optimization if at least one input has been
# transferred to the gpu
if not any(input_transfered):
return False
# Apply the op on the new inputs
with inherit_stack_trace(node.outputs):
new_op_outputs = node.op(*new_inputs, return_list=True)
# Propagate the transfer to the gpu through the outputs that require
# it
new_outputs = []
for i in range(len(new_op_outputs)):
if input_transfered[i]:
new_outputs.append(new_op_outputs[i].transfer('cpu'))
else:
new_outputs.append(new_op_outputs[i])
return new_outputs
return False
@register_opt('fast_compile')
@op_lifter([IfElse])
@register_opt2([IfElse], 'fast_compile')
def local_gpua_lazy_ifelse(op, context_name, inputs, outputs):
if op.gpu:
return
c = inputs[0]
inps = []
falses = []
# ifelse need corresponding true/false inputs variables to be of the same type.
# But we can't rely on inputs to respect that, as GraphToGPU don't enforce that.
# So we need to take care of this here.
for v1, v2 in zip(inputs[1:1 + op.n_outs], inputs[1 + op.n_outs:]):
if ((isinstance(v1.type, tensor.TensorType) and move_to_gpu(v1)) or
isinstance(v1.type, GpuArrayType) or
isinstance(v2.type, GpuArrayType)):
inps.append(as_gpuarray_variable(v1, context_name))
falses.append(as_gpuarray_variable(v2, context_name))
else:
inps.append(v1)
falses.append(v2)
inps.extend(falses)
return IfElse(op.n_outs, gpu=True)(c, *inps, return_list=True)
@register_opt('fast_compile')
@op_lifter([tensor.Join])
@register_opt2([tensor.Join], 'fast_compile')
def local_gpua_join(op, context_name, inputs, outputs):
return gpu_join
@register_opt('fast_compile')
@local_optimizer([GpuJoin])
def local_gpua_join_1(node):
# join of a single element
if (isinstance(node.op, GpuJoin) and
len(node.inputs) == 2):
return [node.inputs[1]]
@register_opt('fast_compile')
@op_lifter([tensor.Split])
@register_opt2([tensor.Split], 'fast_compile')
def local_gpua_split(op, context_name, inputs, outputs):
# TODO use props
return GpuSplit(op.len_splits)
@register_opt('fast_compile')
@op_lifter([tensor.Subtensor])
def local_gpua_subtensor(op, context_name, inputs, outputs):
x = inputs[0]
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or any([isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs])
for n, _ in outputs[0].clients]):
return
else:
return [gpu_x.owner.op(outputs[0]).transfer('cpu')]
return GpuSubtensor(op.idx_list)
@register_opt2([tensor.Subtensor], 'fast_compile')
def local_gpua_subtensor_graph(op, context_name, inputs, outputs):
# We need different code as the condition is different as inputs
# aren't the same.
x = inputs[0]
# We don't want to move the subtensor to the GPU if the inputs is
# on the CPU and the only client of the CPU node is this
# subtensor. This allow to have a smaller transfer.
if (x.owner and isinstance(x.owner.op, GpuFromHost)):
cpu_x = x.owner.inputs[0]
# And it is a shared var or an input of the graph.
# and is used by only 1 node.
# x is in the new graph, so we can't tests its number of clients.
if not cpu_x.owner and len(cpu_x.clients) == 1:
c = outputs[0].clients
# If the subtensor have only 1 client, do it on the CPU.
# We let the other optimization to take care to move the
# next node or not.
if len(c) == 1:
return
return GpuSubtensor(op.idx_list)
@register_opt('fast_compile')
@op_lifter([tensor.IncSubtensor])
@register_opt2([tensor.IncSubtensor], 'fast_compile')
def local_gpua_inc_subtensor(op, context_name, inputs, outputs):
op = GpuIncSubtensor(op.idx_list, op.inplace,
op.set_instead_of_inc,
op.destroyhandler_tolerate_aliased)
ret = op(*inputs)
val = getattr(outputs[0].tag, 'nan_guard_mode_check', True)
ret.tag.nan_guard_mode_check = val
return ret
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor1])
@register_opt2([tensor.AdvancedSubtensor1], 'fast_compile')
def local_gpua_advanced_subtensor1(op, context_name, inputs, outputs):
return GpuAdvancedSubtensor1()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor])
@register_opt2([tensor.AdvancedSubtensor], 'fast_compile')
def local_gpua_advanced_subtensor(op, context_name, inputs, outputs):
return GpuAdvancedSubtensor()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedBooleanSubtensor])
@register_opt2([tensor.AdvancedBooleanSubtensor], 'fast_compile')
def local_gpua_advanced_boolean_subtensor(op, context_name, inputs, outputs):
return GpuAdvancedBooleanSubtensor()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedIncSubtensor1])
@register_opt2([tensor.AdvancedIncSubtensor1], 'fast_compile')
def local_gpua_advanced_incsubtensor1(op, context_name, inputs, outputs):
x, y, ilist = inputs
set_instead_of_inc = op.set_instead_of_inc
if (x.ndim == 1 and y.ndim == 0 and
config.deterministic == 'default'):
x = x.dimshuffle(0, 'x')
y = y.dimshuffle('x', 'x')
ret = GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)
ret = GpuDimShuffle(ret.type.broadcastable, [0])(ret)
return ret
elif (x.ndim != 2 or y.ndim != 2 or
config.deterministic == 'more'):
return GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)
else:
return GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)
# Do not register this optimization for now, as it slows down the
# execution by a lot in important cases.
# @register_opt('fast_compile')
# @op_lifter([tensor.AdvancedIncSubtensor])
# @register_opt2([tensor.AdvancedIncSubtensor], 'fast_compile')
def local_gpua_advanced_incsubtensor(op, context_name, inputs, outputs):
if not op.set_instead_of_inc:
return GpuAdvancedIncSubtensor()
else:
return False
# Do not register this optimization for now, as it slows down the
# execution by a lot in important cases.
# @register_opt('fast_compile')
# @op_lifter([tensor.AdvancedBooleanIncSubtensor])
# @register_opt2([tensor.AdvancedBooleanIncSubtensor], 'fast_compile')
def local_gpua_advanced_boolean_incsubtensor(op, context_name, inputs, outputs):
# GpuAdvancedIncSubtensor only works with a single boolean mask,
# but not with fancy combinations.
if not op.set_instead_of_inc and len(inputs) == 3:
return GpuAdvancedBooleanIncSubtensor()
else:
return False
@register_inplace()
@local_optimizer([GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20])
def local_advincsub1_gpua_inplace(node):
if isinstance(node.op, (GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)):
if not node.op.inplace:
return [node.op.clone_inplace()(*node.inputs)]
# AllocDiag
@register_opt('fast_compile')
@op_lifter([tensor.AllocDiag])
@register_opt2([theano.tensor.AllocDiag], 'fast_compile')
def local_gpu_alloc_diag(op, context_name, inputs, outputs):
if outputs[0].ndim != 2:
# AllocDiag only supports 2d output
return False
return GpuAllocDiag(offset=op.offset)
# ExtractDiag
@register_opt('fast_compile')
@op_lifter([tensor.ExtractDiag])
@register_opt2([theano.tensor.ExtractDiag], 'fast_compile')
def local_gpu_extract_diag(op, context_name, inputs, outputs):
return GpuExtractDiag(offset=op.offset, axis1=op.axis1, axis2=op.axis2, view=op.view)
@register_opt('fast_compile')
@op_lifter([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod])
@register_opt2([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod], 'fast_compile')
def local_gpua_careduce(op, context_name, inputs, outputs):
if isinstance(op.scalar_op, (scalar.Add, scalar.Mul,
scalar.Maximum, scalar.Minimum)):
ctx = get_context(context_name)
if ctx.kind == b'opencl':
op2 = GpuCAReduceCPY
if op.scalar_op not in [scalar.add, scalar.mul]:
# We don't support yet all reduction with cpy code.
return
elif ctx.kind == b'cuda':
op2 = GpuCAReduceCuda
else:
return False
x, = inputs
idtype = x.dtype
adtype = getattr(op, 'acc_dtype', None)
odtype = getattr(op, 'dtype', outputs[0].dtype)
# Force accumulator to float32 for float32 inputs since tree
# reduction will not loose as much precision as linear
# accumulation and float64 is much slower on GPU.
if idtype == 'float32' and odtype == 'float32':
adtype = 'float32'
greduce = op2(
op.scalar_op, axis=op.axis,
dtype=odtype,
acc_dtype=adtype)
with inherit_stack_trace(outputs):
gvar = greduce(x)
# We need to have the make node called, otherwise the mask can
# be None
if (op2 is GpuCAReduceCPY or
gvar.owner.op.supports_c_code([
as_gpuarray_variable(x, context_name)])):
return greduce
else:
# Try to make a simpler pattern based on reshaping
# The principle is that if two adjacent dimensions have
# the same value in the reduce_mask, then we can reshape
# to make them a single dimension, do the reduction, and
# then reshape to get them back.
if op.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in op.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
new_in_shp = [shape_i(x, 0)]
new_mask = [reduce_mask[0]]
for i in xrange(1, x.type.ndim):
if reduce_mask[i] == reduce_mask[i - 1]:
new_in_shp[-1] *= shape_i(x, i)
else:
new_mask.append(reduce_mask[i])
new_in_shp.append(shape_i(x, i))
new_axis = []
for idx, m in enumerate(new_mask):
if m == 1:
new_axis.append(idx)
greduce = op2(
op.scalar_op,
axis=new_axis, reduce_mask=new_mask,
dtype=odtype,
acc_dtype=adtype)
with inherit_stack_trace(outputs):
reshaped_x = x.reshape(tensor.stack(new_in_shp))
gpu_reshaped_x = as_gpuarray_variable(reshaped_x, context_name)
# We need to have the make node called, otherwise the mask can
# be None
gvar = greduce(gpu_reshaped_x)
reshaped_gpu_inputs = [gpu_reshaped_x]
if greduce.supports_c_code(reshaped_gpu_inputs):
reduce_reshaped_x = greduce(gpu_reshaped_x)
if reduce_reshaped_x.ndim != outputs[0].ndim:
out_shp = []
for i in range(x.ndim):
if i not in op.axis:
out_shp.append(shape_i(x, i))
unreshaped_reduce = GpuReshape(len(out_shp))(
reduce_reshaped_x,
tensor.stack(out_shp))
else:
unreshaped_reduce = reduce_reshaped_x
return [unreshaped_reduce]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemv, tensor.blas_c.CGemv])
@register_opt2([tensor.blas.Gemv], 'fast_compile')
def local_gpua_gemv(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
# Use gemm implementation as cublas gemv don't support float16
return gpugemm_no_inplace(inputs[0][:, None],
inputs[1],
inputs[2],
inputs[3][:, None],
inputs[4]).dimshuffle(0)
if inputs[0].dtype not in ['float32', 'float64']:
return
if op.inplace:
return gpugemv_inplace
else:
return gpugemv_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemm])
@register_opt2([tensor.blas.Gemm], 'fast_compile')
def local_gpua_gemm(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float16', 'float32', 'float64']:
return
if op.inplace:
return gpugemm_inplace
else:
return gpugemm_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.BatchedDot])
@register_opt2([tensor.blas.BatchedDot], 'fast_compile')
def local_gpua_gemmbatch(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float16', 'float32', 'float64']:
return
with inherit_stack_trace(outputs):
a, b = inputs
# Since GpuGemmBatch only supports 3D inputs and output,
# we need to add broadcastable dims to the inputs, and drop
# them from outputs
output_dims = [0, 1, 2]
if a.ndim == 2:
a = GpuDimShuffle(a.broadcastable, (0, 'x', 1))(a)
del output_dims[1]
if b.ndim == 2:
b = GpuDimShuffle(b.broadcastable, (0, 1, 'x'))(b)
del output_dims[-1]
# In case of mismatched dtypes, we also have to upcast
out_dtype = outputs[0].dtype
if a.dtype != out_dtype or b.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
if a.dtype != out_dtype:
a = gpu_cast_op(a)
if b.dtype != out_dtype:
b = gpu_cast_op(b)
c = GpuAllocEmpty(out_dtype, context_name)(
a.shape[0], a.shape[1], b.shape[2])
out = gpugemmbatch_no_inplace(c, np.asarray(1.0, dtype=out_dtype),
a, b, np.asarray(0.0, dtype=out_dtype))
if len(output_dims) != 3:
out = GpuDimShuffle(out.broadcastable, output_dims)(out)
return out
@register_opt()
@alpha_merge(GpuGemm, alpha_in=1, beta_in=4)
def local_gpua_gemm_alpha_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemm, alpha_in=1, beta_in=4, out_in=0)
def local_gpua_gemm_output_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@alpha_merge(GpuGemmBatch, alpha_in=1, beta_in=4)
def local_gpua_gemmbatch_alpha_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemmBatch, alpha_in=1, beta_in=4, out_in=0)
def local_gpua_gemmbatch_output_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer])
@register_opt2([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer], 'fast_compile')
def local_gpua_ger(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float32', 'float64']:
return
return GpuGer(inplace=op.destructive)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22])
@register_opt2([tensor.blas.Dot22], 'fast_compile')
def local_gpua_dot22(op, context_name, inputs, outputs):
return gpu_dot22
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22Scalar])
@register_opt2([tensor.blas.Dot22Scalar], 'fast_compile')
def local_gpua_dot22scalar(op, context_name, inputs, outputs):
with inherit_stack_trace(outputs):
x, y, a = inputs
x = as_gpuarray_variable(x, context_name)
y = as_gpuarray_variable(y, context_name)
z = GpuAllocEmpty(x.dtype, context_name)(x.shape[0], y.shape[1])
return [gpugemm_no_inplace(z, a, x, y, 0)]
@register_opt('fast_compile')
@op_lifter([tensor.basic.Eye])
@register_opt2([tensor.basic.Eye], 'fast_compile')
def local_gpua_eye(op, context_name, inputs, outputs):
return GpuEye(dtype=op.dtype, context_name=context_name)
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias])
@register_opt2([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], 'fast_compile')
def local_gpua_crossentropysoftmaxargmax1hotwithbias(op, context_name, inputs, outputs):
return gpu_crossentropy_softmax_argmax_1hot_with_bias
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx])
@register_opt2([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], 'fast_compile')
def local_gpua_crossentropysoftmax1hotwithbiasdx(op, context_name, inputs, outputs):
return gpu_crossentropy_softmax_1hot_with_bias_dx
@register_opt('fast_compile')
@op_lifter([tensor.nnet.Softmax])
@register_opt2([tensor.nnet.Softmax], 'fast_compile')
def local_gpua_softmax(op, context_name, inputs, outputs):
return gpu_softmax
@register_opt('fast_compile')
@op_lifter([tensor.nnet.SoftmaxWithBias])
@register_opt2([tensor.nnet.SoftmaxWithBias], 'fast_compile')
def local_gpua_softmaxwithbias(op, context_name, inputs, outputs):
return gpu_softmax_with_bias
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropyCategorical1Hot])
@register_opt2([tensor.nnet.CrossentropyCategorical1Hot], 'fast_compile')
def local_gpu_crossentropycategorical1hot(op, context_name, inputs, outputs):
# There is no corresponding GPU Op, but we can express it as:
# coding, one_of_n = inputs
# -log(coding[arange(coding.shape[0]), one_of_n])
coding, one_of_n = inputs
idx0 = theano.tensor.arange(shape_i(coding, 0))
return [gpu_neg(gpu_log(coding[idx0, one_of_n]))]
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropyCategorical1HotGrad])
@register_opt2([tensor.nnet.CrossentropyCategorical1HotGrad], 'fast_compile')
def local_gpu_crossentropycategorical1hotgrad(op, context_name, inputs, outputs):
# There is no corresponding GPU Op, but we can express it as:
# gy, coding, one_of_n = inputs
# gcoding = zeros_like(coding)
# gcoding[arange(coding.shape[0]), one_of_n] = -g / (
# coding[arange(coding.shape[0]), one_of_n])
gy, coding, one_of_n = inputs
idx0 = theano.tensor.arange(shape_i(coding, 0))
z = GpuAlloc(context_name, memset_0=True)(
as_gpuarray_variable(np.zeros((), dtype=coding.dtype), context_name),
*[shape_i(coding, i) for i in xrange(coding.ndim)])
gcoding = tensor.set_subtensor(
z[idx0, one_of_n],
gpu_neg(gpu_true_div(gy, coding[idx0, one_of_n])))
return [gcoding.transfer(context_name)]
@register_opt('fast_compile')
@op_lifter([theano.tensor.opt.Assert])
def local_gpua_assert(op, context_name, inputs, outputs):
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_assert_graph(op, context_name, inputs, outputs)
@register_opt2([theano.tensor.opt.Assert], 'fast_compile')
def local_gpua_assert_graph(op, context_name, inputs, outputs):
return [op(as_gpuarray_variable(inputs[0], context_name),
*inputs[1:])]
@register_opt('fast_compile')
@op_lifter([ConvOp])
@register_opt2([ConvOp], 'fast_compile')
def local_gpua_error_convop(op, context_name, inputs, outputs):
assert False, """
ConvOp does not work with the gpuarray backend.
Use the new convolution interface to have GPU convolution working:
theano.tensor.nnet.conv2d()
"""
@register_opt('fast_compile')
@op_lifter([SparseBlockGemv])
@register_opt2([SparseBlockGemv], 'fast_compile')
def local_gpua_sparseblockgemv(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
return
if op.inplace:
return gpu_sparse_block_gemv_inplace
else:
return gpu_sparse_block_gemv
@register_opt('fast_compile')
@op_lifter([SparseBlockOuter])
@register_opt2([SparseBlockOuter], 'fast_compile')
def local_gpua_sparseblockouter(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
return
if op.inplace:
return gpu_sparse_block_outer_inplace
else:
return gpu_sparse_block_outer
@register_inplace()
@local_optimizer([GpuSparseBlockGemv], inplace=True)
def local_inplace_sparseblockgemv(node):
if isinstance(node.op, GpuSparseBlockGemv) and not node.op.inplace:
return [gpu_sparse_block_gemv_inplace(*node.inputs)]
@register_inplace()
@local_optimizer([GpuSparseBlockOuter], inplace=True)
def local_inplace_sparseblockouter(node):
if isinstance(node.op, GpuSparseBlockOuter) and not node.op.inplace:
return [GpuSparseBlockOuter(inplace=True)(*node.inputs)]
# Move to Gpu optimization
@local_optimizer([GpuFromHost,
AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs])
def local_conv_gpu_conv(node):
"""
gpu_from_host(AbstractConv) -> AbstractConv(gpu_from_host)
AbstractConv(host_from_gpu) -> host_from_gpu(AbstractConv)
"""
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
BaseAbstractConv):
conv = host_input.owner.op
inps = list(host_input.owner.inputs)
ctx = infer_context_name(*inps)
inps[0] = as_gpuarray_variable(inps[0], context_name=ctx)
inps[1] = as_gpuarray_variable(inps[1], context_name=ctx)
out = conv(*inps)
# out is on the GPU because both inputs are.
out = theano.tensor.patternbroadcast(out,
node.outputs[0].broadcastable)
return [out]
if isinstance(node.op, BaseAbstractConv):
# conv(host_from_gpu) -> host_from_gpu(gpu_conv)
inp1 = node.inputs[0]
inp2 = node.inputs[1]
if ((isinstance(inp1.type, GpuArrayType) and
isinstance(inp2.type, GpuArrayType))):
# Both inputs are already directly on the GPU, nothing to do
return
inp1_on_gpu = (isinstance(inp1.type, GpuArrayType) or
(inp1.owner and isinstance(inp1.owner.op, HostFromGpu)))
inp2_on_gpu = (isinstance(inp2.type, GpuArrayType) or
(inp2.owner and isinstance(inp2.owner.op, HostFromGpu)))
if inp1_on_gpu or inp2_on_gpu:
conv = node.op
inps = list(node.inputs)
ctx = infer_context_name(*inps)
inps[0] = as_gpuarray_variable(inps[0], context_name=ctx)
inps[1] = as_gpuarray_variable(inps[1], context_name=ctx)
out = conv(*inps)
# out is on the GPU because both inputs are.
out = theano.tensor.patternbroadcast(
out,
node.outputs[0].broadcastable)
# If the original output was on CPU, we have to transfer it
if isinstance(node.outputs[0].type, tensor.TensorType):
return [tensor.as_tensor_variable(out)]
else:
return [out]
register_opt()(local_conv_gpu_conv)
# CorrMM opt
@local_optimizer([AbstractConv2d])
def local_abstractconv_gemm(node):
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
unshared = node.op.unshared
flip = (slice(None),) * (kern.ndim - 2) + \
(slice(None, None, -1),) * 2
kern_axes = (1, 0) + tuple(i for i in range(2, kern.ndim))
if ((border_mode == 'full') and (subsample == (1, 1)) and num_groups == 1 and not unshared):
if not node.op.filter_flip:
kern = kern[flip]
# need to dimshuffle the kernel for full convolution
kern = kern.dimshuffle(kern_axes)
# call GpuCorrMM_gradInputs
rval = GpuCorrMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
else:
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[flip]
# By default use GpuCorrMM
rval = GpuCorrMM(border_mode,
subsample,
filter_dilation,
num_groups,
unshared)(gpu_contiguous(img),
gpu_contiguous(kern))
# call GpuCorrMM_gradWeights if good
# (the latter is faster if batchsize * kernelHeight * kernelWidth
# is larger than inputChannels * outputHeight * outputWidth.
# GpuConv does not always store information on the batchsize and
# channels, though, so we only use what information we have.)
if ((subsample == (1, 1)) and (filter_dilation == (1, 1)) and
(node.op.imshp is not None) and
(None not in node.op.imshp[-2:]) and
(node.op.kshp is not None) and
(None not in node.op.kshp) and
border_mode != "half" and
num_groups == 1 and
not unshared):
# we know the kernel and output size
prod1 = node.op.kshp[0] * node.op.kshp[-3]
prod2 = ((node.op.imshp[-2] - node.op.kshp[0] + 1) *
(node.op.imshp[-1] - node.op.kshp[-3] + 1))
if (None not in node.op.imshp[:1]):
# we also know batchsize and input channels
prod1 *= node.op.imshp[0]
prod2 *= node.op.imshp[1]
# compare to decide
if prod1 > prod2:
rval = GpuCorrMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3)))
# (we need to wrap the result in as_gpuarray_variable,
# because we are not allowed to replace a GpuArray with
# a DimShuffle instance in a graph optimization)
rval = as_gpuarray_variable(
rval.dimshuffle(1, 0, 2, 3),
context_name=ctx)
return [rval]
# CorrMM opt used for Meta-optimizer
@local_optimizer([AbstractConv2d])
def local_abstractconv_gemm_def(node):
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
unshared = node.op.unshared
if node.op.filter_flip:
flip = (slice(None),) * (kern.ndim - 2) + \
(slice(None, None, -1),) * 2
kern = kern[flip]
rval = GpuCorrMM(border_mode,
subsample,
filter_dilation,
num_groups,
unshared)(gpu_contiguous(img),
gpu_contiguous(kern))
return [rval]
@local_optimizer([AbstractConv2d])
def local_abstractconv_gemm_alt(node):
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
unshared = node.op.unshared
if border_mode == 'full' and subsample == (1, 1) and num_groups == 1 and not unshared:
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
kern = kern.dimshuffle(1, 0, 2, 3)
rval = GpuCorrMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
elif (border_mode == 'valid' and subsample == (1, 1) and filter_dilation == (1, 1) and
num_groups == 1 and not unshared):
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
rval = GpuCorrMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3)))
rval = as_gpuarray_variable(rval.dimshuffle(1, 0, 2, 3),
context_name=ctx)
else:
return None
return [rval]
@local_optimizer([AbstractConv3d])
def local_abstractconv3d_gemm(node):
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
if ((border_mode == 'full') and (subsample == (1, 1, 1)) and num_groups == 1):
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
# need to dimshuffle the kernel for full convolution
kern = kern.dimshuffle(1, 0, 2, 3, 4)
# call GpuCorr3dMM_gradInputs
rval = GpuCorr3dMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
else:
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
# By default use GpuCorr3dMM
rval = GpuCorr3dMM(border_mode,
subsample,
filter_dilation,
num_groups)(gpu_contiguous(img),
gpu_contiguous(kern))
# call GpuCorr3dMM_gradWeights if good
# (the latter is faster if batchsize * kernelHeight * kernelWidth * kernelDepth
# is larger than inputChannels * outputHeight * outputWidth * outputDepth.
# GpuConv does not always store information on the batchsize and
# channels, though, so we only use what information we have.)
if ((subsample == (1, 1, 1)) and (filter_dilation == (1, 1, 1)) and
(node.op.imshp is not None) and
(None not in node.op.imshp[-3:]) and
(node.op.kshp is not None) and
(None not in node.op.kshp) and
border_mode != "half" and
num_groups == 1):
# we know the kernel and output size
prod1 = node.op.kshp[0] * node.op.kshp[1] * node.op.kshp[2]
prod2 = ((node.op.imshp[-3] - node.op.kshp[0] + 1) *
(node.op.imshp[-2] - node.op.kshp[1] + 1) *
(node.op.imshp[-1] - node.op.kshp[2] + 1))
if (None not in node.op.imshp[:1]):
# we also know batchsize and input channels
prod1 *= node.op.imshp[0]
prod2 *= node.op.imshp[1]
# compare to decide
if prod1 > prod2:
rval = GpuCorr3dMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3, 4)))
# (we need to wrap the result in as_gpuarray_variable,
# because we are not allowed to replace a GpuArray with
# a DimShuffle instance in a graph optimization)
rval = as_gpuarray_variable(
rval.dimshuffle(1, 0, 2, 3, 4),
context_name=ctx)
return [rval]
# Corr3dMM opt used for Meta-optimizer
@local_optimizer([AbstractConv3d])
def local_abstractconv3d_gemm_def(node):
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
# By default use GpuCorr3dMM
rval = GpuCorr3dMM(border_mode,
subsample,
filter_dilation,
node.op.num_groups)(gpu_contiguous(img),
gpu_contiguous(kern))
return [rval]
@local_optimizer([AbstractConv3d])
def local_abstractconv3d_alt(node):
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
if((border_mode == 'full') and (subsample == (1, 1, 1)) and
(num_groups == 1)):
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
kern = kern.dimshuffle(1, 0, 2, 3, 4)
rval = GpuCorr3dMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
elif(subsample == (1, 1, 1) and filter_dilation == (1, 1, 1) and
border_mode == 'valid' and num_groups == 1):
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = GpuCorr3dMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3, 4)))
rval = as_gpuarray_variable(rval.dimshuffle(1, 0, 2, 3, 4),
context_name=ctx)
else:
return None
return [rval]
@local_optimizer([AbstractConv3d])
def local_abstractconv3d2d(node):
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
if(subsample == (1, 1, 1) and filter_dilation == (1, 1, 1) and
num_groups == 1):
reorder_array = [0, 2, 1, 3, 4]
rval = conv3d2d.conv3d(gpu_contiguous(img.dimshuffle(*reorder_array)),
gpu_contiguous(kern.dimshuffle(*reorder_array)),
[node.op.imshp[i] for i in reorder_array],
[node.op.kshp[i] for i in reorder_array],
border_mode=border_mode)
rval = as_gpuarray_variable(rval.dimshuffle(*reorder_array),
context_name=ctx)
return [rval]
else:
return None
@local_optimizer([AbstractConv2d_gradWeights])
def local_abstractconv_gradweights_gemm(node):
if not isinstance(node.op, AbstractConv2d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
rval = GpuCorrMM_gradWeights(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups,
unshared=node.op.unshared)(
gpu_contiguous(img), gpu_contiguous(topgrad), shape)
flip = (slice(None),) * (rval.ndim - 2) + \
(slice(None, None, -1),) * 2
if node.op.filter_flip:
rval = rval[flip]
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
@local_optimizer([AbstractConv2d_gradWeights])
def local_abstractconv_gemm_gradweights_alt(node):
if not isinstance(node.op, AbstractConv2d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
unshared = node.op.unshared
if(border_mode == 'valid' and subsample == (1, 1) and filter_dilation == (1, 1) and
num_groups == 1 and not unshared):
rval = GpuCorrMM(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),
gpu_contiguous(topgrad.dimshuffle(1, 0, 2, 3)))
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1]
rval = rval.dimshuffle(1, 0, 2, 3)
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
else:
return None
@local_optimizer([AbstractConv3d_gradWeights])
def local_abstractconv3d_gemm_gradweights_alt(node):
if not isinstance(node.op, AbstractConv3d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
if(border_mode == 'valid' and subsample == (1, 1, 1) and
filter_dilation == (1, 1, 1) and num_groups == 1):
rval = GpuCorr3dMM(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4)),
gpu_contiguous(topgrad.dimshuffle(1, 0, 2, 3, 4)))
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1, ::-1]
rval = rval.dimshuffle(1, 0, 2, 3, 4)
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
else:
return None
@local_optimizer([AbstractConv3d_gradWeights])
def local_abstractconv3d_gradweights_gemm(node):
if not isinstance(node.op, AbstractConv3d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
rval = GpuCorr3dMM_gradWeights(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups)(
gpu_contiguous(img), gpu_contiguous(topgrad), shape)
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1, ::-1]
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
@local_optimizer([AbstractConv2d_gradInputs])
def local_abstractconv_gradinputs_gemm(node):
if not isinstance(node.op, AbstractConv2d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
if node.op.filter_flip:
flip = (slice(None),) * (kern.ndim - 2) + \
(slice(None, None, -1),) * 2
kern = kern[flip]
rval = GpuCorrMM_gradInputs(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups,
unshared=node.op.unshared)(
gpu_contiguous(kern), gpu_contiguous(topgrad), shape)
return [rval]
@local_optimizer([AbstractConv2d_gradInputs])
def local_abstractconv_gradinputs_gemm_alt(node):
if not isinstance(node.op, AbstractConv2d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
unshared = node.op.unshared
if border_mode == 'valid' and subsample == (1, 1) and num_groups == 1 and not unshared:
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
rval = GpuCorrMM(border_mode='full',
subsample=subsample,
filter_dilation=filter_dilation)(
gpu_contiguous(topgrad),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3)))
return [rval]
else:
return None
@local_optimizer([AbstractConv3d_gradInputs])
def local_abstractconv3d_gradinputs_gemm(node):
if not isinstance(node.op, AbstractConv3d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = GpuCorr3dMM_gradInputs(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups)(
gpu_contiguous(kern), gpu_contiguous(topgrad), shape)
return [rval]
@local_optimizer([AbstractConv3d_gradInputs])
def local_abstractconv3d_gradinputs_gemm_alt(node):
if not isinstance(node.op, AbstractConv3d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
if(border_mode == 'valid' and subsample == (1, 1, 1) and
num_groups == 1):
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = GpuCorr3dMM(border_mode='full',
subsample=subsample,
filter_dilation=filter_dilation)(
gpu_contiguous(topgrad),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3, 4)))
return [rval]
else:
return None
class ConvMetaOptimizer(LocalMetaOptimizer):
def __init__(self):
super(ConvMetaOptimizer, self).__init__()
def time_call(self, fn):
start = time.time()
fn()[0].sync()
return time.time() - start
def provide_inputs(self, node, inputs):
result = {}
shapes = (node.op.imshp, node.op.kshp)
if(node.op.imshp is None or node.op.kshp is None or
any([s is None for shape in shapes for s in shape])):
return result
if type(node.op) in [AbstractConv2d, AbstractConv3d]:
img, kern = node.inputs
for(var, shape) in zip((img, kern), shapes):
result[var] = theano.shared(np.random.random(shape).astype(var.dtype),
var.name,
broadcastable=var.broadcastable,
borrow=True)
if type(node.op) in [AbstractConv2d_gradWeights, AbstractConv3d_gradWeights]:
img, top, kshape = node.inputs
tshp = get_conv_output_shape(node.op.imshp,
node.op.kshp,
node.op.border_mode,
node.op.subsample,
node.op.filter_dilation)
convdim = img.ndim - 2
result[kshape] = theano.tensor.as_tensor_variable(node.op.kshp[-convdim:])
for(var, shape) in zip((img, top), (node.op.imshp, tshp)):
result[var] = theano.shared(np.random.random(shape).astype(var.dtype),
var.name,
broadcastable=var.broadcastable,
borrow=True)
if type(node.op) in [AbstractConv2d_gradInputs, AbstractConv3d_gradInputs]:
kern, top, ishape = node.inputs
tshp = get_conv_output_shape(node.op.imshp,
node.op.kshp,
node.op.border_mode,
node.op.subsample,
node.op.filter_dilation)
result[ishape] = theano.tensor.as_tensor_variable(node.op.imshp[2:])
for(var, shape) in zip((kern, top), (node.op.kshp, tshp)):
result[var] = theano.shared(np.random.random(shape).astype(var.dtype),
var.name,
broadcastable=var.broadcastable,
borrow=True)
return result
def get_opts(self, node):
opts = Counter([opt for opt in self.track_dict[type(node.op)]
if opt in self.tag_dict['default']])
include_tags = config.metaopt.optimizer_including.split(':')
exclude_tags = config.metaopt.optimizer_excluding.split(':')
for in_opt in include_tags:
opts.update([opt for opt in self.track_dict[type(node.op)]
if opt in self.tag_dict[in_opt]])
for ex_opt in exclude_tags:
opts.subtract([opt for opt in self.track_dict[type(node.op)]
if opt in self.tag_dict[ex_opt]])
opts = list(opts + Counter())
return opts
# This deals with any abstract convs that have a transfer somewhere
@register_opt('fast_compile', 'conv_dnn', 'cudnn')
@op_lifter([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs])
def local_gpua_abstractconv(op, context_name, inputs, outputs):
if isinstance(outputs[0].type, GpuArrayType):
# Don't handle this node here, it's already on the GPU.
return
return local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs)
@register_opt2([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs], 'fast_compile')
def local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs):
inps = list(inputs)
inps[0] = as_gpuarray_variable(inputs[0],
context_name=context_name)
inps[1] = as_gpuarray_variable(inputs[1],
context_name=context_name)
return [op(*inps)]
def local_gpu_pool(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
op = GpuPool(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
ret_padded = op(inp_padded, ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
pool_db = LocalGroupDB()
pool_db2 = LocalGroupDB(local_opt=theano.gof.opt.GraphToGPULocalOptGroup)
pool_db2.__name__ = "pool_db2"
lifter = op_lifter([pool.Pool])(local_gpu_pool)
pool_db.register("local_gpu_pool", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_pool",
local_optimizer([pool.Pool])(local_gpu_pool),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
register_opt('fast_compile', name='pool_db')(pool_db)
register_opt2([pool.Pool], 'fast_compile', name='pool_db2')(pool_db2)
def local_gpu_max_pool_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out = gpu_contiguous(as_gpuarray_variable(out, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuMaxPoolGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out, out_grad, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
out_padded = pad_dims(out, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
lifter = op_lifter([pool.MaxPoolGrad])(local_gpu_max_pool_grad)
pool_db.register("local_gpu_max_pool_grad", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_max_pool_grad",
local_optimizer([pool.MaxPoolGrad])(local_gpu_max_pool_grad),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
def local_gpu_average_pool_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuAveragePoolGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out_grad, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
lifter = op_lifter([pool.AveragePoolGrad])(local_gpu_average_pool_grad)
pool_db.register("local_gpu_average_pool_grad", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_average_pool_grad",
local_optimizer([pool.AveragePoolGrad])(local_gpu_average_pool_grad),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
@register_opt()
@op_lifter([pool.DownsampleFactorMaxGradGrad])
@register_opt2([pool.DownsampleFactorMaxGradGrad])
def local_gpu_downsample_factor_max_grad_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out = gpu_contiguous(as_gpuarray_variable(out, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuDownsampleFactorMaxGradGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out, out_grad, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
out_padded = pad_dims(out, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
@register_opt()
@op_lifter([pool.MaxPoolRop])
@register_opt2([pool.MaxPoolRop])
def local_gpu_max_pool_rop(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, eval_inp, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
eval_inp = gpu_contiguous(as_gpuarray_variable(eval_inp, ctx_name))
op = GpuMaxPoolRop(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, eval_inp, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
eval_inp_padded = pad_dims(eval_inp, 2, nd)
ret_padded = op(inp_padded, eval_inp_padded, ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
@register_opt("low_memory")
@local_optimizer([GpuCAReduceCuda])
def local_gpu_elemwise_careduce(node):
"""
Merge some GpuCAReduceCuda and GPUElemwise.
Currently merged:
- SUM(X^2)
- SUM(ABS(X))
"""
if (isinstance(node.op, GpuCAReduceCuda) and
node.op.pre_scalar_op is None and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, GpuElemwise) and
# The Op support all scalar with 1 inputs. We don't
# automatically add more case, as some like trigonometic
# operation with some reduction pattern will probably results
# in slow down.
isinstance(node.inputs[0].owner.op.scalar_op, (scalar.basic.Sqr,
scalar.basic.Abs))):
inp = node.inputs[0].owner.inputs[0]
props = node.op._props_dict()
props["pre_scalar_op"] = node.inputs[0].owner.op.scalar_op
with inherit_stack_trace(node.outputs):
out = GpuCAReduceCuda(**props)(inp)
return [out]
@local_optimizer(None)
def local_assert_no_cpu_op(node):
if (all([var.owner and isinstance(var.owner.op, HostFromGpu)
for var in node.inputs]) and
any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)]
for var in node.outputs])):
if config.assert_no_cpu_op == "warn":
_logger.warning(("CPU Op %s is detected in the computation "
"graph") % node)
elif config.assert_no_cpu_op == "raise":
raise AssertionError("The Op %s is on CPU." % node)
elif config.assert_no_cpu_op == "pdb":
pdb.set_trace()
# Register the local_assert_no_cpu_op:
assert_no_cpu_op = theano.tensor.opt.in2out(local_assert_no_cpu_op,
name='assert_no_cpu_op')
# 49.2 is after device specialization & fusion optimizations for last transfers
optdb.register('gpua_assert_no_cpu_op', assert_no_cpu_op, 49.2,
'assert_no_cpu_op')
def tensor_to_gpu(x, context_name):
if isinstance(x.type, tensor.TensorType):
y = GpuArrayType(broadcastable=x.type.broadcastable,
context_name=context_name,
dtype=x.type.dtype)()
if x.name:
y.name = x.name + '[Gpua]'
return y
else:
return x
def gpu_safe_new(x, tag=''):
"""
Internal function that constructs a new variable from x with the same
type, but with a different name (old name + tag). This function is used
by gradient, or the R-op to construct new variables for the inputs of
the inner graph such that there is no interference between the original
graph and the newly constructed graph.
"""
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else:
nw_name = None
if isinstance(x, theano.Constant):
return x.clone()
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def gpu_reconstruct_graph(inputs, outputs, tag=None):
"""
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those (in the same
order as the original inputs).
"""
if tag is None:
tag = ''
nw_inputs = [gpu_safe_new(x, tag) for x in inputs]
givens = {}
for nw_x, x in zip(nw_inputs, inputs):
givens[x] = nw_x
nw_outputs = scan_utils.clone(outputs, replace=givens)
return (nw_inputs, nw_outputs)
@register_opt('scan', 'fast_compile')
@op_lifter([scan_op.Scan])
@register_opt2([scan_op.Scan], 'fast_compile')
def local_gpua_scan_to_gpua(op, context_name, inputs, outputs):
info = copy.deepcopy(op.info)
if info.get('gpua', False):
return
info['gpua'] = True
nw_ins = [inputs[0]]
e = (1 +
op.n_seqs +
op.n_mit_mot +
op.n_mit_sot +
op.n_sit_sot +
op.n_shared_outs)
nw_ins += [safe_to_gpu(x, context_name) for x in inputs[1:e]]
b = e
e = e + op.n_nit_sot
nw_ins += inputs[b:e]
nw_ins += [safe_to_gpu(x, context_name) for x in inputs[e:]]
scan_ins = [tensor_to_gpu(x, context_name) for x in op.inputs]
# The inner output corresponding to the looping condition should not be
# moved to the gpu
if op.info['as_while']:
scan_outs = [safe_to_gpu(x, context_name) for x in op.outputs[:-1]]
scan_outs += [op.outputs[-1]]
else:
scan_outs = [safe_to_gpu(x, context_name) for x in op.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=list(zip(op.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about the gpu and can not
# handle graphs with inputs being on the gpu
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
info['gpu_hash'] = hash(_cmodule_key)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
nw_op = scan_op.Scan(scan_ins, scan_outs, info,
typeConstructor=typebuild).make_node(*nw_ins)
return nw_op.outputs
def _scan_type_infer(node):
context_name = infer_context_name(*node.inputs)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
return typebuild
# Add optimization : maxandargmax (CPU -> GPU)
@register_opt('fast_compile')
@op_lifter([tensor.MaxAndArgmax])
@register_opt2([tensor.MaxAndArgmax], 'fast_compile')
def local_gpu_maxandargmax(op, context_name, inputs, outputs):
op = GpuMaxAndArgmax(op.get_params(None))
if inputs[0].dtype == "float16":
# For now it is better to copy/cast on the GPU then transfer to the CPU
casted_inputs = inputs[0].astype('float32')
ret = op(casted_inputs)
return [ret[0].astype('float16'), ret[1]]
return op
@register_opt('fast_compile')
@op_lifter([Images2Neibs])
@register_opt2([Images2Neibs], 'fast_compile')
def local_gpua_images2neibs(op, context_name, inputs, outputs):
if op.mode in ['valid', 'half', 'full', 'ignore_borders', 'wrap_centered']:
return GpuImages2Neibs(op.mode)
# solve
@register_opt('fast_compile')
@op_lifter([slinalg.Solve])
@register_opt2([theano.tensor.slinalg.Solve], 'fast_compile')
def local_gpu_solve(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float16', 'float32']:
return
if op.A_structure not in MATRIX_STRUCTURES_SOLVE:
return
if op.A_structure in ['lower_triangular', 'upper_triangular']:
if not cublas_available:
return
lower = op.A_structure == 'lower_triangular'
op = GpuCublasTriangularSolve(lower)
else:
if not cusolver_available:
return
op = GpuCusolverSolve(A_structure=op.A_structure)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32'),
inputs[1].astype('float32')).astype('float16')
return op
@register_inplace()
@local_optimizer([GpuCusolverSolve], inplace=True)
def local_inplace_gpu_solve(node):
if isinstance(node.op, GpuCusolverSolve) and not node.op.inplace:
with inherit_stack_trace(node.outputs):
return [GpuCusolverSolve(A_structure=node.op.A_structure, trans=node.op.trans,
inplace=True)(*node.inputs)]
# Cholesky decomposition
def local_gpu_cholesky(op, context_name, inputs, outputs):
if not cusolver_available:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuCholesky(lower=op.lower, inplace=op.destructive)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
matrix_ops_db = LocalGroupDB()
matrix_ops_db2 = LocalGroupDB(local_opt=theano.gof.opt.GraphToGPULocalOptGroup)
matrix_ops_db2.__name__ = "matrix_ops_db2"
# For Cholesky decomposition, magma 2.2 is slower than cusolver 8 (tested for
# matrices of size 1000). Thus, cusolver is prioritized during graph
# optimizations. To explicitly use magma, you should disable cusolver using
# `optimizer_excluding=cusolver` in Theano config.
lifter = op_lifter([slinalg.Cholesky])(local_gpu_cholesky)
matrix_ops_db.register("local_gpu_cholesky", lifter,
'gpuarray', 'fast_compile', 'fast_run', 'cusolver',
position=0)
matrix_ops_db2.register("local_gpu_cholesky",
local_optimizer([slinalg.Cholesky])(local_gpu_cholesky),
'gpuarray', 'fast_compile', 'fast_run', 'cusolver',
position=0)
register_opt('fast_compile', name='matrix_ops_db')(matrix_ops_db)
register_opt2([slinalg.Solve], 'fast_compile', name='matrix_ops_db2')(matrix_ops_db2)
@register_inplace()
@local_optimizer([GpuCholesky], inplace=True)
def local_inplace_gpu_cholesky(node):
if isinstance(node.op, GpuCholesky) and not node.op.inplace:
with inherit_stack_trace(node.outputs):
return [node.op.clone_inplace()(*node.inputs)]
def local_gpu_magma_cholesky(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuMagmaCholesky(lower=op.lower, inplace=op.destructive)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
lifter = op_lifter([slinalg.Cholesky])(local_gpu_magma_cholesky)
matrix_ops_db.register("local_gpu_magma_cholesky", lifter,
'gpuarray', 'fast_compile', 'fast_run', 'magma',
position=1)
matrix_ops_db2.register("local_gpu_magma_cholesky",
local_optimizer([slinalg.Cholesky])(local_gpu_magma_cholesky),
'gpuarray', 'fast_compile', 'fast_run', 'magma',
position=1)
@register_inplace()
@local_optimizer([GpuMagmaCholesky], inplace=True)
def local_inplace_gpu_magma_cholesky(node):
if isinstance(node.op, GpuMagmaCholesky) and not node.op.inplace:
return [node.op.clone_inplace()(*node.inputs)]
# QR decomposition
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.QRFull])
@register_opt2([theano.tensor.nlinalg.QRFull], 'magma', 'fast_compile')
def local_gpu_magma_qr(op, context_name, inputs, outputs):
if not config.magma.enabled or op.mode != 'reduced':
return
if inputs[0].dtype not in ['float16', 'float32']:
return
x = inputs[0]
if inputs[0].dtype == 'float16':
x = inputs[0].astype('float32')
out = gpu_qr(x, complete=True)
if inputs[0].dtype == 'float16':
return [o.astype('float16') for o in out]
return out
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.QRIncomplete])
@register_opt2([theano.tensor.nlinalg.QRIncomplete], 'magma', 'fast_compile')
def local_gpu_magma_qr_incomplete(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
x = inputs[0]
if inputs[0].dtype == 'float16':
x = inputs[0].astype('float32')
out = gpu_qr(x, complete=False)
if inputs[0].dtype == 'float16':
return [out.astype('float16')]
return out
# Matrix inverse
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.MatrixInverse])
@register_opt2([theano.tensor.nlinalg.MatrixInverse], 'magma', 'fast_compile')
def local_gpu_magma_matrix_inverse(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuMagmaMatrixInverse()
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
@register_inplace()
@local_optimizer([GpuMagmaMatrixInverse])
def local_inplace_gpu_magma_matrix_inverse(node):
if isinstance(node.op, GpuMagmaMatrixInverse) and not node.op.inplace:
with inherit_stack_trace(node.outputs):
return [node.op.clone_inplace()(*node.inputs)]
# Eigen decomposition of a symmetric matrix
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.Eigh])
@register_opt2([theano.tensor.nlinalg.Eigh], 'magma', 'fast_compile')
def local_gpu_magma_eigh(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuMagmaEigh(UPLO=op.UPLO, compute_v=True)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
# Singular Value Decomposition
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.SVD])
@register_opt2([theano.tensor.nlinalg.SVD], 'magma', 'fast_compile')
def local_gpu_magma_svd(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
x = inputs[0]
if inputs[0].dtype == 'float16':
x = inputs[0].astype('float32')
out = gpu_svd(x, compute_uv=op.compute_uv, full_matrices=op.full_matrices)
if inputs[0].dtype == 'float16':
if op.compute_uv:
out = [o.astype('float16') for o in out]
else:
out = [out.astype('float16')]
return out
@register_opt('ctc', 'fast_compile')
@op_lifter([theano.tensor.nnet.ctc.ConnectionistTemporalClassification])
@register_opt2([ConnectionistTemporalClassification], 'ctc', 'fast_compile')
def local_gpu_ctc(op, context_name, inputs, outputs):
op = GpuConnectionistTemporalClassification(compute_grad=op.compute_grad)
return op.make_node(*inputs).outputs
# Do not register in fast_run or fast_compile.
# It will be added to fast_run if the GPU is enabled.
optdb.register('gpua_scanOp_make_inplace',
scan_opt.ScanInplaceOptimizer(typeInfer=_scan_type_infer,
gpua_flag=True),
75,
'gpuarray',
'inplace',
'scan')
# Register GPU convolution implementation
# They are tried in a specific order so we can control
# which ones take precedence over others.
abstractconv_groupopt = theano.gof.optdb.LocalGroupDB()
abstractconv_groupopt.__name__ = "gpuarray_abstractconv_opts"
register_opt('fast_compile')(abstractconv_groupopt)
# We import these opts here instead of at the top of this file
# to avoid a circular dependency problem with dnn
from .dnn import (local_abstractconv_cudnn,
local_abstractconv_gw_cudnn,
local_abstractconv_gi_cudnn, # noqa: 402
local_abstractconv_cudnn_alt,
local_abstractconv3d_cudnn_alt)
abstractconv_groupopt.register('local_abstractconv_dnn',
local_abstractconv_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
abstractconv_groupopt.register('local_abstractconv_gw_dnn',
local_abstractconv_gw_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
abstractconv_groupopt.register('local_abstractconv_gi_dnn',
local_abstractconv_gi_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
# The GEMM-based convolution comes last to catch all remaining cases.
# It can be disabled by excluding 'conv_gemm'.
abstractconv_groupopt.register('local_abstractconv_gemm', local_abstractconv_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gemm', local_abstractconv3d_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv_gradweights_gemm',
local_abstractconv_gradweights_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gradweights_gemm',
local_abstractconv3d_gradweights_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv_gradinputs',
local_abstractconv_gradinputs_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gradinputs',
local_abstractconv3d_gradinputs_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
conv_metaopt = ConvMetaOptimizer()
conv_metaopt.register(local_abstractconv_cudnn,
['default', 'cudnn', 'conv_dnn'])
conv_metaopt.register(local_abstractconv_gw_cudnn,
['default', 'cudnn', 'conv_dnn'])
conv_metaopt.register(local_abstractconv_gi_cudnn,
['default', 'cudnn', 'conv_dnn'])
conv_metaopt.register(local_abstractconv_gemm_def,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d_gemm_def,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_gradweights_gemm,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d_gradweights_gemm,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_gradinputs_gemm,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d_gradinputs_gemm,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_gemm_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_gemm_gradweights_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_gradinputs_gemm_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_cudnn_alt,
['default', 'alternative', 'cudnn', 'conv_dnn'])
conv_metaopt.register(local_abstractconv3d_cudnn_alt,
['default', 'alternative', 'cudnn', 'conv_dnn'])
conv_metaopt.register(local_abstractconv3d_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d_gemm_gradweights_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d_gradinputs_gemm_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d2d,
['alternative', 'conv3d2d'])
abstractconv_groupopt.register('conv_metaopt', conv_metaopt, 'conv_meta', position=0)
# Register cuDNN batch normalization implementation
# We import these opts here instead of at the top of this file
# to avoid a circular dependency problem with dnn
from .dnn import (local_abstract_batch_norm_train_cudnn,
local_abstract_batch_norm_train_grad_cudnn,
local_abstract_batch_norm_inference_cudnn) # noqa: 402
abstract_batch_norm_groupopt = theano.gof.optdb.LocalGroupDB()
abstract_batch_norm_groupopt.__name__ = "gpuarray_batchnorm_opts"
register_opt('fast_compile')(abstract_batch_norm_groupopt)
abstract_batch_norm_db = LocalGroupDB()
abstract_batch_norm_db2 = LocalGroupDB(
local_opt=theano.gof.opt.GraphToGPULocalOptGroup)
abstract_batch_norm_db2.__name__ = "abstract_batch_norm_db2"
register_opt('fast_compile', name='abstract_batch_norm_db')(
abstract_batch_norm_db)
register_opt2([bn.AbstractBatchNormTrain,
bn.AbstractBatchNormTrainGrad,
bn.AbstractBatchNormInference],
'fast_compile', name='abstract_batch_norm_db2')(
abstract_batch_norm_db2)
for op, fct, cpu in [(bn.AbstractBatchNormTrain,
local_abstract_batch_norm_train_cudnn,
bn.local_abstract_batch_norm_train),
(bn.AbstractBatchNormTrainGrad,
local_abstract_batch_norm_train_grad_cudnn,
bn.local_abstract_batch_norm_train_grad),
(bn.AbstractBatchNormInference,
local_abstract_batch_norm_inference_cudnn,
bn.local_abstract_batch_norm_inference)]:
lifter = op_lifter([op])(fct)
abstract_batch_norm_db.register(fct.__name__,
lifter,
'gpuarray', 'fast_compile', 'fast_run',
'cudnn', 'batchnorm_dnn',
position=1)
abstract_batch_norm_db2.register(fct.__name__,
local_optimizer([op])(fct),
'gpuarray', 'fast_compile', 'fast_run',
'cudnn', 'batchnorm_dnn',
position=1)
# cpu is a normal optimization. We can't register it in
# GraphToGPU. So for now, only add it to the slower EQ phase. If
# there is no cuDNN, we still want to move it to the GPU now with
# a Theano graph so to have this graph on the GPU.
abstract_batch_norm_db.register(cpu.__name__, cpu,
'gpuarray', 'fast_compile', 'fast_run',
position='last')
| 39.078338 | 118 | 0.613088 | from __future__ import absolute_import, print_function, division
import copy
import numpy as np
import logging
import pdb
import time
from six import iteritems
from six.moves import xrange
import sys
import theano
from theano import tensor, scalar, gof, config
from theano.compile import optdb
from theano.compile.ops import shape_i
from theano.gof import (local_optimizer, EquilibriumDB, TopoOptimizer,
LocalGroupDB,
SequenceDB, Optimizer, DB, toolbox, graph)
from theano.gof.opt import (LocalMetaOptimizer, copy_stack_trace,
inherit_stack_trace)
from theano.ifelse import IfElse
from theano.misc.ordered_set import OrderedSet
from theano.scalar.basic import Scalar, Pow, Cast
from theano.scalar.basic import log, neg, true_div
from theano.scalar.basic_scipy import Erfinv, Erfcinv
from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.nnet import bn, conv3d2d
from theano.tensor.nnet.conv import ConvOp
from theano.tensor.nnet.blocksparse import SparseBlockGemv, SparseBlockOuter
from theano.tensor.nnet.abstract_conv import (BaseAbstractConv,
AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs,
get_conv_output_shape)
from theano.tensor.nnet.neighbours import Images2Neibs
from theano.tensor.nnet.ctc import ConnectionistTemporalClassification
import theano.tensor.nlinalg as nlinalg
import theano.tensor.signal.pool as pool
import theano.tensor.slinalg as slinalg
from collections import Counter
from theano.tests.breakpoint import PdbBreakpoint
from .type import (GpuArrayType, GpuArrayConstant, get_context,
ContextNotDefined, move_to_gpu)
from .basic_ops import (as_gpuarray_variable, infer_context_name,
host_from_gpu, GpuToGpu,
HostFromGpu, GpuFromHost,
GpuSplit, GpuContiguous, gpu_contiguous,
GpuAlloc, GpuAllocEmpty, GpuReshape,
GpuEye, gpu_join, GpuJoin)
from .blas import (gpu_dot22, GpuGemm, GpuGer, GpuGemmBatch,
gpugemm_no_inplace, gpugemm_inplace,
gpugemmbatch_no_inplace,
gpugemv_no_inplace, gpugemv_inplace,
GpuCorrMM, GpuCorrMM_gradInputs, GpuCorrMM_gradWeights,
GpuCorr3dMM, GpuCorr3dMM_gradInputs, GpuCorr3dMM_gradWeights)
from .pool import (GpuPool, GpuMaxPoolGrad, GpuAveragePoolGrad, GpuMaxPoolRop,
GpuDownsampleFactorMaxGradGrad)
from .blocksparse import (GpuSparseBlockGemv, GpuSparseBlockOuter,
gpu_sparse_block_outer,
gpu_sparse_block_outer_inplace,
gpu_sparse_block_gemv, gpu_sparse_block_gemv_inplace)
from .nnet import (gpu_crossentropy_softmax_1hot_with_bias_dx,
gpu_crossentropy_softmax_argmax_1hot_with_bias,
gpu_softmax_with_bias, gpu_softmax)
from .elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda,
GpuCAReduceCPY, gpu_erfinv, gpu_erfcinv,
max_inputs_to_GpuElemwise)
from .subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedSubtensor,
GpuAdvancedSubtensor1,
GpuAdvancedBooleanSubtensor,
GpuAdvancedIncSubtensor,
GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20,
GpuAdvancedBooleanIncSubtensor,
GpuAllocDiag, GpuExtractDiag)
from .opt_util import alpha_merge, output_merge, pad_dims, unpad_dims
from .reduction import GpuMaxAndArgmax
from .linalg import (GpuCusolverSolve, MATRIX_STRUCTURES_SOLVE, GpuCholesky,
cusolver_available, GpuMagmaMatrixInverse, gpu_svd,
GpuMagmaCholesky, gpu_qr, GpuMagmaEigh,
GpuCublasTriangularSolve, cublas_available)
from .neighbours import GpuImages2Neibs
from .ctc import GpuConnectionistTemporalClassification
_logger = logging.getLogger("theano.gpuarray.opt")
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
gpu_optimizer2 = EquilibriumDB()
class GraphToGPUDB(DB):
def query(self, *tags, **kwtags):
opt = gpu_optimizer2.query(*tags, **kwtags)
return GraphToGPU(opt.local_optimizers_all, opt.local_optimizers_map)
gpu_seqopt = SequenceDB()
gpu_seqopt.register('gpuarray_graph_optimization', GraphToGPUDB(), -0.5,
'fast_compile', 'fast_run', 'gpuarray')
gpu_seqopt.register('gpuarray_local_optimizations', gpu_optimizer, 1,
'fast_compile', 'fast_run', 'gpuarray', 'gpuarray_local_optimiziations')
gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,
'fast_compile', 'fast_run', 'gpuarray')
optdb.register('gpuarray_opt', gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpuarray')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
def register_opt2(tracks, *tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
if isinstance(local_opt, theano.gof.DB):
opt = local_opt
else:
opt = theano.gof.local_optimizer(tracks)(local_opt)
gpu_optimizer2.register(name, opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
def register_inplace(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
optdb.register(
name, TopoOptimizer(
local_opt, failure_callback=TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace', 'gpuarray', *tags)
return local_opt
return f
register_opt('fast_compile')(theano.tensor.opt.local_track_shape_i)
register_opt(final_opt=True, name='gpua_constant_folding')(
tensor.opt.constant_folding)
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
def safe_to_gpu(x, ctx_name):
if isinstance(x.type, tensor.TensorType):
return GpuFromHost(ctx_name)(x)
else:
return x
def safe_to_cpu(x):
if isinstance(x.type, GpuArrayType):
return x.transfer('cpu')
else:
return x
gpu_log = GpuElemwise(log)
gpu_neg = GpuElemwise(neg)
gpu_true_div = GpuElemwise(true_div)
def op_lifter(OP, cuda_only=False):
def f(maker):
def local_opt(node):
if type(node.op) in OP:
replace = False
context_name = None
for i in node.inputs:
if (i.owner and i.owner.op == host_from_gpu and
move_to_gpu(i)):
context_name = i.owner.inputs[0].type.context_name
replace = True
break
if not replace:
clients = [c for o in node.outputs for c in o.clients]
replace = len(clients) != 0
for c, idx in clients:
if (c == 'output' or
not isinstance(c.op, GpuFromHost)):
replace = False
if replace:
context_name = clients[0][0].op.context_name
if (not replace or
(cuda_only and
get_context(context_name).kind != b'cuda') or
any(["complex" in getattr(i, 'dtype', "")
for i in node.inputs])):
return False
for i in node.inputs:
i.tag.context_name = context_name
new_op = maker(node.op, context_name, node.inputs, node.outputs)
if new_op and new_op != node.op:
if isinstance(new_op, theano.Op):
new_outputs = new_op(*node.inputs, return_list=True)
to_cpu_fn = safe_to_cpu
elif isinstance(new_op, (tuple, list)):
new_outputs = new_op
to_cpu_fn = safe_to_cpu
else:
new_outputs = [new_op]
def to_cpu_fn(x):
return x.transfer('cpu')
on_cpu = []
for old_output, new_output in zip(node.outputs, new_outputs):
copy_stack_trace(old_output, new_output)
cpu = to_cpu_fn(new_output)
on_cpu.append(cpu)
copy_stack_trace(old_output, cpu)
return on_cpu
return False
local_opt.__name__ = maker.__name__
return local_optimizer(OP)(local_opt)
return f
class InputToGpuOptimizer(Optimizer):
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, GpuArrayType):
continue
if (all(cl[0] == 'output' or isinstance(cl[0].op, GpuFromHost)
for cl in input.clients)):
continue
target = getattr(input.tag, 'target', None)
if target == 'cpu':
continue
if (isinstance(input.type, tensor.TensorType) and
not move_to_gpu(input)):
continue
try:
new_input = GpuFromHost(target)(input).transfer('cpu')
fgraph.replace_validate(input, new_input,
"InputToGpuOptimizer")
except TypeError:
# This could fail if the inputs are not TensorTypes
pass
except ContextNotDefined:
if hasattr(input.tag, 'target'):
raise
# If there is no context tag and no default context
# then it stays on the CPU
pass
gpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
class GraphToGPU(Optimizer):
def __init__(self, local_optimizers_all, local_optimizers_map):
self.local_optimizers_all = local_optimizers_all
self.local_optimizers_map = local_optimizers_map
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
mapping = {}
time_opts = {}
node_created = {}
process_count = {}
t_topo = time.time()
topo = fgraph.toposort()
time_topo = time.time()
toposort_timing = time_topo - t_topo
# Building a new graph
# Iterating through inputs of graph
target = infer_context_name(*fgraph.inputs)
for i in fgraph.inputs:
if isinstance(i.type, tensor.TensorType) and move_to_gpu(i):
mapping[i] = i.transfer(getattr(i.tag, 'target', target))
else:
mapping[i] = i
for i in fgraph.variables:
if isinstance(i, theano.Constant):
mapping[i] = i
for node in topo:
for lopt in (self.local_optimizers_map.get(node.op, []) +
self.local_optimizers_map.get(type(node.op), []) +
self.local_optimizers_all):
process_count.setdefault(lopt, 0)
time_opts.setdefault(lopt, 0)
node_created.setdefault(lopt, 0)
for node in topo:
if isinstance(node.op, HostFromGpu):
mapping[node.outputs[0]] = mapping[node.inputs[0]]
continue
# Move only if any of the inputs are on the GPU.
move_to_GPU = False
context_name = None
for i in [mapping[i] for i in node.inputs]:
if isinstance(i.type, GpuArrayType):
context_name = i.type.context_name
move_to_GPU = True
break
if (not move_to_GPU and
isinstance(node.op, (theano.tensor.Alloc,
theano.tensor.AllocEmpty,
theano.tensor.basic.Eye))):
# If the Alloc[Empty] have a client that will be moved
# to the GPU, we should move the Alloc* on the GPU.
# We approximate this by supposing that if we have an
# optimization for one of the clients op, then we will
# move the client to the GPU.
for c, _ in node.outputs[0].clients:
if (c != 'output' and
(self.local_optimizers_map.get(c.op, []) +
self.local_optimizers_map.get(type(c.op), []))):
move_to_GPU = True
new_ops = None
if move_to_GPU and any(["complex" in getattr(i, 'dtype', "")
for i in node.inputs]):
move_to_GPU = False
# Apply the lifter
if move_to_GPU:
for lopt in (self.local_optimizers_map.get(node.op, []) +
self.local_optimizers_map.get(type(node.op), []) +
self.local_optimizers_all):
t_opt = time.time()
new_ops = lopt.transform(node.op, context_name,
[mapping[i] for i in node.inputs],
node.outputs)
t_opt2 = time.time()
time_opts[lopt] += t_opt2 - t_opt
if new_ops:
process_count[lopt] += 1
break
outputs = []
if isinstance(new_ops, theano.Op):
with inherit_stack_trace(node.outputs):
outputs = new_ops(*[mapping[i] for i in node.inputs], return_list=True)
elif not new_ops:
newnode = node.clone_with_new_inputs([mapping.get(i) for i in node.inputs])
outputs = newnode.outputs
elif isinstance(new_ops, (tuple, list)):
outputs = new_ops
elif isinstance(new_ops, theano.Variable):
outputs = [new_ops]
for old_output, new_output in zip(node.outputs, outputs):
copy_stack_trace(old_output, new_output)
if new_ops:
node_created[lopt] += len(graph.ops([mapping[i] for i in node.inputs], outputs))
if any([getattr(old_o, 'dtype', None) != getattr(new_o, 'dtype', None)
for old_o, new_o in zip(outputs, node.outputs)]):
_logger.warning(
"The optimization %s returned bad dtype. Skipping it."
" Write to theano-dev mailing list about this." %
str(lopt))
newnode = node.clone_with_new_inputs([mapping.get(i) for i in node.inputs])
outputs = newnode.outputs
for new_o, old_o in zip(outputs, node.outputs):
assert len(outputs) == len(node.outputs)
mapping[old_o] = new_o
new_nodes = []
for o in fgraph.outputs:
new_o = mapping[o]
if new_o.type != o.type:
assert isinstance(o.type, tensor.TensorType)
assert isinstance(new_o.type, GpuArrayType)
# This condition is needed in the case one input is an
# output of the graph. Without this, it would
# introduce cycle as we don't replace correctly that
if (new_o.owner and
isinstance(new_o.owner.op, GpuFromHost) and
new_o.owner.inputs[0].type == o.type):
new_o = new_o.owner.inputs[0]
else:
new_o = copy_stack_trace(o, safe_to_cpu(new_o))
new_nodes.append(new_o)
fgraph.replace_all_validate(zip(fgraph.outputs, new_nodes),
reason=self.__class__.__name__)
return (self, toposort_timing, time_opts, node_created, process_count)
@staticmethod
def print_profile(stream, prof, level=0):
(opt, toposort_timing, time_opts, node_created, process_count) = prof
blanc = (' ' * level)
print(blanc, "GraphToGPUOptimizer", end=' ', file=stream)
print(blanc, getattr(opt, "name",
getattr(opt, "__name__", "")), file=stream)
print(blanc, " time io_toposort %.3fs" % toposort_timing, file=stream)
s = sum(time_opts.values())
print(blanc, "Total time taken by local optimizers %.3fs " % s, file=stream)
count_opt = []
not_used = []
not_used_time = 0
for o, count in iteritems(process_count):
if count > 0:
count_opt.append((time_opts[o], count,
node_created[o], o))
else:
not_used.append((time_opts[o], o))
not_used_time += time_opts[o]
if count_opt:
print(blanc,
' times - times applied - Node created - name:',
file=stream)
count_opt.sort()
for (t, count, n_created, o) in count_opt[::-1]:
print(blanc, ' %.3fs - %d - %d - %s' % (
t, count, n_created, o), file=stream)
print(blanc, ' %.3fs - in %d optimization that were not used (display only those with a runtime > 0)' % (
not_used_time, len(not_used)), file=stream)
not_used.sort(key=lambda nu: (nu[0], str(nu[1])))
for (t, o) in not_used[::-1]:
if t > 0:
print(blanc + " ", ' %.3fs - %s' % (t, o), file=stream)
print(file=stream)
@staticmethod
def merge_profile(prof1, prof2):
# (opt, toposort_timing, time_opts, node_created, process_count) = prof1
local_optimizers = OrderedSet(prof1[0].local_optimizers_all).union(
prof2[0].local_optimizers_all)
def merge_dict(d1, d2):
d = d1.copy()
for k, v in iteritems(d2):
if k in d:
d[k] += v
else:
d[k] = v
return d
local_optimizers_map = merge_dict(prof1[0].local_optimizers_map,
prof2[0].local_optimizers_map)
new_opt = GraphToGPU(local_optimizers, local_optimizers_map)
toposort_timing = prof1[1] + prof2[1]
time_opts = merge_dict(prof1[2], prof2[2])
node_created = merge_dict(prof1[3], prof2[3])
process_count = merge_dict(prof1[4], prof2[4])
return (new_opt,
toposort_timing,
time_opts,
node_created,
process_count)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print("%s%s (%i)" % (
(' ' * level), self.__class__.__name__, id(self)), file=stream)
if depth != 0:
map_values = []
for opts in self.local_optimizers_map.values():
map_values += opts
for opt in self.local_optimizers_all + map_values:
opt.print_summary(stream, level=(level + 2), depth=(depth - 1))
@local_optimizer([GpuFromHost, GpuToGpu, HostFromGpu])
def local_cut_gpu_transfers(node):
# gpu[ab] -> host -> gpub
if (isinstance(node.op, GpuFromHost) and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, HostFromGpu)):
other = node.inputs[0].owner.inputs[0]
if node.op.context_name == other.type.context_name:
return [other]
else:
return [GpuToGpu(node.op.context_name)(other)]
# ? -> gpua -> host
elif (isinstance(node.op, HostFromGpu) and
node.inputs[0].owner):
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [n2.inputs[0]]
# gpub ->
if isinstance(n2.op, GpuToGpu):
return [n2.inputs[0].transfer('cpu')]
# ? -> gpua -> gpub
elif isinstance(node.op, GpuToGpu):
# Transfer within same context
if node.inputs[0].type.context_name == node.op.context_name:
return [node.inputs[0]]
if node.inputs[0].owner:
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [as_gpuarray_variable(n2.inputs[0],
node.op.context_name)]
# gpuc ->
if isinstance(n2.op, GpuToGpu):
if node.op.context_name == n2.inputs[0].type.context_name:
return [n2.inputs[0]]
else:
return [node.op(n2.inputs[0])]
gpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
gpu_cut_copies.register('cut_gpua_constant_transfers',
tensor.opt.constant_folding,
'fast_compile', 'fast_run', 'gpuarray')
optdb['canonicalize'].register('local_cut_gpua_host_gpua',
local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
@register_opt('fast_compile')
@local_optimizer([tensor.Alloc])
def local_gpua_alloc2(node):
try:
get_context(None)
except ContextNotDefined:
# If there is no default context then we do not perform the move here.
return
if (isinstance(node.op, tensor.Alloc) and
all(c != 'output' and
isinstance(c.op, tensor.Join) and
all(i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:])
for c, idx in node.outputs[0].clients)):
return [GpuAlloc(None)(*node.inputs).transfer('cpu')]
@register_opt('fast_compile')
@op_lifter([tensor.Alloc])
@register_opt2([tensor.Alloc], 'fast_compile')
def local_gpuaalloc(op, context_name, inputs, outputs):
return GpuAlloc(context_name)(*inputs)
@register_opt('fast_compile')
@op_lifter([tensor.AllocEmpty])
@register_opt2([tensor.AllocEmpty], 'fast_compile')
def local_gpua_alloc_empty(op, context_name, inputs, outputs):
# We use _props_dict() to make sure that the GPU op know all the
# CPU op props.
return GpuAllocEmpty(context_name=context_name, **op._props_dict())(*inputs)
@register_opt()
@local_optimizer([GpuAlloc])
def local_gpualloc_memset_0(node):
if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
inp = node.inputs[0]
if (isinstance(inp, GpuArrayConstant) and
inp.data.size == 1 and
(np.asarray(inp.data) == 0).all()):
new_op = GpuAlloc(node.op.context_name, memset_0=True)
with inherit_stack_trace(node.outputs):
return new_op(*node.inputs, return_list=True)
# Don't register by default.
@gof.local_optimizer([GpuAllocEmpty])
def local_gpua_alloc_empty_to_zeros(node):
if isinstance(node.op, GpuAllocEmpty):
context_name = infer_context_name(*node.inputs)
z = np.asarray(0, dtype=node.outputs[0].dtype)
with inherit_stack_trace(node.outputs):
return [GpuAlloc(context_name)(
as_gpuarray_variable(z, context_name), *node.inputs)]
optdb.register('local_gpua_alloc_empty_to_zeros',
theano.tensor.opt.in2out(local_gpua_alloc_empty_to_zeros),
49.3,
'alloc_empty_to_zeros',)
@register_opt()
@local_optimizer([GpuContiguous])
def local_gpu_contiguous_gpu_contiguous(node):
if isinstance(node.op, GpuContiguous):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, GpuContiguous):
return [inp]
@register_opt('fast_compile')
@op_lifter([tensor.extra_ops.CpuContiguous])
@register_opt2([tensor.extra_ops.CpuContiguous], 'fast_compile')
def local_gpua_contiguous(op, context_name, inputs, outputs):
return gpu_contiguous
@register_opt('fast_compile')
@op_lifter([tensor.Reshape])
@register_opt2([tensor.Reshape], 'fast_compile')
def local_gpua_reshape(op, context_name, inputs, outputs):
res = GpuReshape(op.ndim)
return res
@register_opt('fast_compile')
@op_lifter([tensor.Rebroadcast])
@register_opt2([tensor.Rebroadcast], 'fast_compile')
def local_gpua_rebroadcast(op, context_name, inputs, outputs):
return op(as_gpuarray_variable(inputs[0], context_name))
@register_opt('fast_compile')
@op_lifter([tensor.Flatten])
@register_opt2([tensor.Flatten], 'fast_compile')
def local_gpua_flatten(op, context_name, inputs, outputs):
shp = []
if op.outdim != 1:
shp = [inputs[0].shape[i] for i in range(op.outdim - 1)]
shp += [-1]
res = GpuReshape(op.outdim)
o = res(inputs[0], theano.tensor.as_tensor_variable(shp))
return o
@register_opt('fast_compile')
@op_lifter([tensor.Elemwise])
@register_opt2([tensor.Elemwise], 'fast_compile')
def local_gpua_elemwise(op, context_name, inputs, outputs):
scal_op = op.scalar_op
name = op.name
if name:
name = 'Gpu' + name
if len(outputs) > 1:
return
have_cuda = False
have_opencl = False
if inputs and isinstance(inputs[0].type, GpuArrayType):
kind = inputs[0].type.context.kind
if kind.startswith(b'opencl'):
have_opencl = True
elif kind.startswith(b'cuda'):
have_cuda = True
convert = {Erfinv: gpu_erfinv,
Erfcinv: gpu_erfcinv}
if scal_op.__class__ in convert:
scal_op = convert[scal_op.__class__]
if have_opencl:
_logger.warning(
'Function "%s" is not supported with OpenCL. Use "device=cuda" instead.' %
scal_op)
if not have_cuda:
return None
if not scal_op.supports_c_code(inputs, outputs):
return
res = GpuElemwise(scal_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec)
if isinstance(op.scalar_op, Pow):
out_dtype = outputs[0].dtype
if out_dtype not in ['float16', 'float32', 'float64']:
return
new_inputs = []
for inp in inputs:
if inp.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp, context_name)))
else:
new_inputs.append(as_gpuarray_variable(inp, context_name))
gpu_output = res(*new_inputs)
return [gpu_output]
elif op.scalar_op in (scalar.add, scalar.mul):
try:
return [split_inputs(inputs, max_inputs_to_GpuElemwise(outputs), res)]
except ValueError:
return False
else:
return res
def split_inputs(inputs, max_nb_inputs, op):
if max_nb_inputs <= 1 and len(inputs) > 1:
raise ValueError("Can not split nodes because inputs' dimensionality and/or"
" number of outputs is too large")
while len(inputs) > max_nb_inputs:
inner_ops = []
for i in range(0, len(inputs), max_nb_inputs):
inner_ops.append(op(*inputs[i: i + max_nb_inputs]))
inputs = inner_ops
return op(*inputs)
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
optdb.register('gpua_elemwise_fusion',
# 48.5 move to gpu
# 48.6 specialize
# 49 cpu fusion
# 49.5 add destroy handler
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 49,
'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray')
inplace_gpu_elemwise_opt = tensor.opt.InplaceElemwiseOptimizer(
GpuElemwise)
optdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75,
'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray')
register_opt(tensor.opt.local_useless_elemwise)
@register_opt('fast_compile')
@op_lifter([tensor.DimShuffle])
@register_opt2([tensor.DimShuffle], 'fast_compile')
def local_gpua_dimshuffle(op, context_name, inputs, outputs):
return GpuDimShuffle(op.input_broadcastable,
op.new_order)
@register_opt('fast_compile')
@op_lifter([tensor.SpecifyShape])
@register_opt2([tensor.SpecifyShape], 'fast_compile')
def local_gpua_specifyShape(op, context_name, inputs, outputs):
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_specifyShape_graph(op, context_name, inputs, outputs)
@register_opt2([tensor.SpecifyShape], 'fast_compile')
def local_gpua_specifyShape_graph(op, context_name, inputs, outputs):
inp = [as_gpuarray_variable(inputs[0], context_name)]
inp += inputs[1:]
return tensor.specify_shape(*inp)
@register_opt('fast_compile')
@op_lifter([theano.compile.ops.Shape])
def local_gpua_shape(op, context_name, inputs, outputs):
# op_lifter will call this opt too frequently as the output is
# always on the CPU.
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_shape_graph(op, context_name, inputs, outputs)
@register_opt2([tensor.compile.ops.Shape], 'fast_compile')
def local_gpua_shape_graph(op, context_name, inputs, outputs):
return [as_gpuarray_variable(inputs[0], context_name).shape]
def gpu_print_wrapper(op, cnda):
op.old_op.global_fn(op.old_op, np.asarray(cnda))
@register_opt('fast_compile')
@op_lifter([tensor.printing.Print])
@register_opt2([tensor.printing.Print], 'fast_compile')
def local_gpua_print_op(op, context_name, inputs, outputs):
x, = inputs
with inherit_stack_trace(outputs):
gpu_x = as_gpuarray_variable(x, context_name=context_name)
new_op = op.__class__(global_fn=gpu_print_wrapper)
new_op.old_op = op
return new_op(gpu_x)
@register_opt('fast_compile')
@local_optimizer([PdbBreakpoint])
def local_gpu_pdbbreakpoint_op(node):
if isinstance(node.op, PdbBreakpoint):
old_inputs = node.inputs
old_outputs = node.outputs
new_inputs = node.inputs[:1]
input_transfered = []
# Go through the monitored variables, only transferring on GPU those
# for which the input comes from the GPU or the output will be
# transferred on the GPU.
nb_monitored_vars = len(node.outputs)
for i in range(nb_monitored_vars):
inp = old_inputs[i + 1]
out = old_outputs[i]
input_is_from_gpu = (inp.owner and
isinstance(inp.owner.op, HostFromGpu))
output_goes_to_gpu = False
for c in out.clients:
if c == 'output':
continue
if isinstance(c[0].op, GpuFromHost):
output_goes_to_gpu = True
context_name = c[0].op.context_name
break
if input_is_from_gpu:
# The op should be applied on the GPU version of the input
new_inputs.append(inp.owner.inputs[0])
input_transfered.append(True)
elif output_goes_to_gpu:
# The input should be transferred to the gpu
new_inputs.append(as_gpuarray_variable(inp, context_name))
input_transfered.append(True)
else:
# No transfer is required.
new_inputs.append(inp)
input_transfered.append(False)
# Only continue the optimization if at least one input has been
# transferred to the gpu
if not any(input_transfered):
return False
# Apply the op on the new inputs
with inherit_stack_trace(node.outputs):
new_op_outputs = node.op(*new_inputs, return_list=True)
# Propagate the transfer to the gpu through the outputs that require
# it
new_outputs = []
for i in range(len(new_op_outputs)):
if input_transfered[i]:
new_outputs.append(new_op_outputs[i].transfer('cpu'))
else:
new_outputs.append(new_op_outputs[i])
return new_outputs
return False
@register_opt('fast_compile')
@op_lifter([IfElse])
@register_opt2([IfElse], 'fast_compile')
def local_gpua_lazy_ifelse(op, context_name, inputs, outputs):
if op.gpu:
return
c = inputs[0]
inps = []
falses = []
# ifelse need corresponding true/false inputs variables to be of the same type.
# But we can't rely on inputs to respect that, as GraphToGPU don't enforce that.
# So we need to take care of this here.
for v1, v2 in zip(inputs[1:1 + op.n_outs], inputs[1 + op.n_outs:]):
if ((isinstance(v1.type, tensor.TensorType) and move_to_gpu(v1)) or
isinstance(v1.type, GpuArrayType) or
isinstance(v2.type, GpuArrayType)):
inps.append(as_gpuarray_variable(v1, context_name))
falses.append(as_gpuarray_variable(v2, context_name))
else:
inps.append(v1)
falses.append(v2)
inps.extend(falses)
return IfElse(op.n_outs, gpu=True)(c, *inps, return_list=True)
@register_opt('fast_compile')
@op_lifter([tensor.Join])
@register_opt2([tensor.Join], 'fast_compile')
def local_gpua_join(op, context_name, inputs, outputs):
return gpu_join
@register_opt('fast_compile')
@local_optimizer([GpuJoin])
def local_gpua_join_1(node):
# join of a single element
if (isinstance(node.op, GpuJoin) and
len(node.inputs) == 2):
return [node.inputs[1]]
@register_opt('fast_compile')
@op_lifter([tensor.Split])
@register_opt2([tensor.Split], 'fast_compile')
def local_gpua_split(op, context_name, inputs, outputs):
# TODO use props
return GpuSplit(op.len_splits)
@register_opt('fast_compile')
@op_lifter([tensor.Subtensor])
def local_gpua_subtensor(op, context_name, inputs, outputs):
x = inputs[0]
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or any([isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs])
for n, _ in outputs[0].clients]):
return
else:
return [gpu_x.owner.op(outputs[0]).transfer('cpu')]
return GpuSubtensor(op.idx_list)
@register_opt2([tensor.Subtensor], 'fast_compile')
def local_gpua_subtensor_graph(op, context_name, inputs, outputs):
# We need different code as the condition is different as inputs
# aren't the same.
x = inputs[0]
# on the CPU and the only client of the CPU node is this
# subtensor. This allow to have a smaller transfer.
if (x.owner and isinstance(x.owner.op, GpuFromHost)):
cpu_x = x.owner.inputs[0]
# And it is a shared var or an input of the graph.
# and is used by only 1 node.
# x is in the new graph, so we can't tests its number of clients.
if not cpu_x.owner and len(cpu_x.clients) == 1:
c = outputs[0].clients
if len(c) == 1:
return
return GpuSubtensor(op.idx_list)
@register_opt('fast_compile')
@op_lifter([tensor.IncSubtensor])
@register_opt2([tensor.IncSubtensor], 'fast_compile')
def local_gpua_inc_subtensor(op, context_name, inputs, outputs):
op = GpuIncSubtensor(op.idx_list, op.inplace,
op.set_instead_of_inc,
op.destroyhandler_tolerate_aliased)
ret = op(*inputs)
val = getattr(outputs[0].tag, 'nan_guard_mode_check', True)
ret.tag.nan_guard_mode_check = val
return ret
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor1])
@register_opt2([tensor.AdvancedSubtensor1], 'fast_compile')
def local_gpua_advanced_subtensor1(op, context_name, inputs, outputs):
return GpuAdvancedSubtensor1()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor])
@register_opt2([tensor.AdvancedSubtensor], 'fast_compile')
def local_gpua_advanced_subtensor(op, context_name, inputs, outputs):
return GpuAdvancedSubtensor()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedBooleanSubtensor])
@register_opt2([tensor.AdvancedBooleanSubtensor], 'fast_compile')
def local_gpua_advanced_boolean_subtensor(op, context_name, inputs, outputs):
return GpuAdvancedBooleanSubtensor()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedIncSubtensor1])
@register_opt2([tensor.AdvancedIncSubtensor1], 'fast_compile')
def local_gpua_advanced_incsubtensor1(op, context_name, inputs, outputs):
x, y, ilist = inputs
set_instead_of_inc = op.set_instead_of_inc
if (x.ndim == 1 and y.ndim == 0 and
config.deterministic == 'default'):
x = x.dimshuffle(0, 'x')
y = y.dimshuffle('x', 'x')
ret = GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)
ret = GpuDimShuffle(ret.type.broadcastable, [0])(ret)
return ret
elif (x.ndim != 2 or y.ndim != 2 or
config.deterministic == 'more'):
return GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)
else:
return GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)
def local_gpua_advanced_incsubtensor(op, context_name, inputs, outputs):
if not op.set_instead_of_inc:
return GpuAdvancedIncSubtensor()
else:
return False
def local_gpua_advanced_boolean_incsubtensor(op, context_name, inputs, outputs):
if not op.set_instead_of_inc and len(inputs) == 3:
return GpuAdvancedBooleanIncSubtensor()
else:
return False
@register_inplace()
@local_optimizer([GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20])
def local_advincsub1_gpua_inplace(node):
if isinstance(node.op, (GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)):
if not node.op.inplace:
return [node.op.clone_inplace()(*node.inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.AllocDiag])
@register_opt2([theano.tensor.AllocDiag], 'fast_compile')
def local_gpu_alloc_diag(op, context_name, inputs, outputs):
if outputs[0].ndim != 2:
return False
return GpuAllocDiag(offset=op.offset)
@register_opt('fast_compile')
@op_lifter([tensor.ExtractDiag])
@register_opt2([theano.tensor.ExtractDiag], 'fast_compile')
def local_gpu_extract_diag(op, context_name, inputs, outputs):
return GpuExtractDiag(offset=op.offset, axis1=op.axis1, axis2=op.axis2, view=op.view)
@register_opt('fast_compile')
@op_lifter([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod])
@register_opt2([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod], 'fast_compile')
def local_gpua_careduce(op, context_name, inputs, outputs):
if isinstance(op.scalar_op, (scalar.Add, scalar.Mul,
scalar.Maximum, scalar.Minimum)):
ctx = get_context(context_name)
if ctx.kind == b'opencl':
op2 = GpuCAReduceCPY
if op.scalar_op not in [scalar.add, scalar.mul]:
return
elif ctx.kind == b'cuda':
op2 = GpuCAReduceCuda
else:
return False
x, = inputs
idtype = x.dtype
adtype = getattr(op, 'acc_dtype', None)
odtype = getattr(op, 'dtype', outputs[0].dtype)
# Force accumulator to float32 for float32 inputs since tree
# reduction will not loose as much precision as linear
# accumulation and float64 is much slower on GPU.
if idtype == 'float32' and odtype == 'float32':
adtype = 'float32'
greduce = op2(
op.scalar_op, axis=op.axis,
dtype=odtype,
acc_dtype=adtype)
with inherit_stack_trace(outputs):
gvar = greduce(x)
# We need to have the make node called, otherwise the mask can
# be None
if (op2 is GpuCAReduceCPY or
gvar.owner.op.supports_c_code([
as_gpuarray_variable(x, context_name)])):
return greduce
else:
# Try to make a simpler pattern based on reshaping
# The principle is that if two adjacent dimensions have
# the same value in the reduce_mask, then we can reshape
# to make them a single dimension, do the reduction, and
# then reshape to get them back.
if op.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in op.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
new_in_shp = [shape_i(x, 0)]
new_mask = [reduce_mask[0]]
for i in xrange(1, x.type.ndim):
if reduce_mask[i] == reduce_mask[i - 1]:
new_in_shp[-1] *= shape_i(x, i)
else:
new_mask.append(reduce_mask[i])
new_in_shp.append(shape_i(x, i))
new_axis = []
for idx, m in enumerate(new_mask):
if m == 1:
new_axis.append(idx)
greduce = op2(
op.scalar_op,
axis=new_axis, reduce_mask=new_mask,
dtype=odtype,
acc_dtype=adtype)
with inherit_stack_trace(outputs):
reshaped_x = x.reshape(tensor.stack(new_in_shp))
gpu_reshaped_x = as_gpuarray_variable(reshaped_x, context_name)
# We need to have the make node called, otherwise the mask can
# be None
gvar = greduce(gpu_reshaped_x)
reshaped_gpu_inputs = [gpu_reshaped_x]
if greduce.supports_c_code(reshaped_gpu_inputs):
reduce_reshaped_x = greduce(gpu_reshaped_x)
if reduce_reshaped_x.ndim != outputs[0].ndim:
out_shp = []
for i in range(x.ndim):
if i not in op.axis:
out_shp.append(shape_i(x, i))
unreshaped_reduce = GpuReshape(len(out_shp))(
reduce_reshaped_x,
tensor.stack(out_shp))
else:
unreshaped_reduce = reduce_reshaped_x
return [unreshaped_reduce]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemv, tensor.blas_c.CGemv])
@register_opt2([tensor.blas.Gemv], 'fast_compile')
def local_gpua_gemv(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
# Use gemm implementation as cublas gemv don't support float16
return gpugemm_no_inplace(inputs[0][:, None],
inputs[1],
inputs[2],
inputs[3][:, None],
inputs[4]).dimshuffle(0)
if inputs[0].dtype not in ['float32', 'float64']:
return
if op.inplace:
return gpugemv_inplace
else:
return gpugemv_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemm])
@register_opt2([tensor.blas.Gemm], 'fast_compile')
def local_gpua_gemm(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float16', 'float32', 'float64']:
return
if op.inplace:
return gpugemm_inplace
else:
return gpugemm_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.BatchedDot])
@register_opt2([tensor.blas.BatchedDot], 'fast_compile')
def local_gpua_gemmbatch(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float16', 'float32', 'float64']:
return
with inherit_stack_trace(outputs):
a, b = inputs
output_dims = [0, 1, 2]
if a.ndim == 2:
a = GpuDimShuffle(a.broadcastable, (0, 'x', 1))(a)
del output_dims[1]
if b.ndim == 2:
b = GpuDimShuffle(b.broadcastable, (0, 1, 'x'))(b)
del output_dims[-1]
out_dtype = outputs[0].dtype
if a.dtype != out_dtype or b.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
if a.dtype != out_dtype:
a = gpu_cast_op(a)
if b.dtype != out_dtype:
b = gpu_cast_op(b)
c = GpuAllocEmpty(out_dtype, context_name)(
a.shape[0], a.shape[1], b.shape[2])
out = gpugemmbatch_no_inplace(c, np.asarray(1.0, dtype=out_dtype),
a, b, np.asarray(0.0, dtype=out_dtype))
if len(output_dims) != 3:
out = GpuDimShuffle(out.broadcastable, output_dims)(out)
return out
@register_opt()
@alpha_merge(GpuGemm, alpha_in=1, beta_in=4)
def local_gpua_gemm_alpha_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemm, alpha_in=1, beta_in=4, out_in=0)
def local_gpua_gemm_output_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@alpha_merge(GpuGemmBatch, alpha_in=1, beta_in=4)
def local_gpua_gemmbatch_alpha_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemmBatch, alpha_in=1, beta_in=4, out_in=0)
def local_gpua_gemmbatch_output_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer])
@register_opt2([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer], 'fast_compile')
def local_gpua_ger(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float32', 'float64']:
return
return GpuGer(inplace=op.destructive)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22])
@register_opt2([tensor.blas.Dot22], 'fast_compile')
def local_gpua_dot22(op, context_name, inputs, outputs):
return gpu_dot22
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22Scalar])
@register_opt2([tensor.blas.Dot22Scalar], 'fast_compile')
def local_gpua_dot22scalar(op, context_name, inputs, outputs):
with inherit_stack_trace(outputs):
x, y, a = inputs
x = as_gpuarray_variable(x, context_name)
y = as_gpuarray_variable(y, context_name)
z = GpuAllocEmpty(x.dtype, context_name)(x.shape[0], y.shape[1])
return [gpugemm_no_inplace(z, a, x, y, 0)]
@register_opt('fast_compile')
@op_lifter([tensor.basic.Eye])
@register_opt2([tensor.basic.Eye], 'fast_compile')
def local_gpua_eye(op, context_name, inputs, outputs):
return GpuEye(dtype=op.dtype, context_name=context_name)
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias])
@register_opt2([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], 'fast_compile')
def local_gpua_crossentropysoftmaxargmax1hotwithbias(op, context_name, inputs, outputs):
return gpu_crossentropy_softmax_argmax_1hot_with_bias
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx])
@register_opt2([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], 'fast_compile')
def local_gpua_crossentropysoftmax1hotwithbiasdx(op, context_name, inputs, outputs):
return gpu_crossentropy_softmax_1hot_with_bias_dx
@register_opt('fast_compile')
@op_lifter([tensor.nnet.Softmax])
@register_opt2([tensor.nnet.Softmax], 'fast_compile')
def local_gpua_softmax(op, context_name, inputs, outputs):
return gpu_softmax
@register_opt('fast_compile')
@op_lifter([tensor.nnet.SoftmaxWithBias])
@register_opt2([tensor.nnet.SoftmaxWithBias], 'fast_compile')
def local_gpua_softmaxwithbias(op, context_name, inputs, outputs):
return gpu_softmax_with_bias
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropyCategorical1Hot])
@register_opt2([tensor.nnet.CrossentropyCategorical1Hot], 'fast_compile')
def local_gpu_crossentropycategorical1hot(op, context_name, inputs, outputs):
coding, one_of_n = inputs
idx0 = theano.tensor.arange(shape_i(coding, 0))
return [gpu_neg(gpu_log(coding[idx0, one_of_n]))]
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropyCategorical1HotGrad])
@register_opt2([tensor.nnet.CrossentropyCategorical1HotGrad], 'fast_compile')
def local_gpu_crossentropycategorical1hotgrad(op, context_name, inputs, outputs):
gy, coding, one_of_n = inputs
idx0 = theano.tensor.arange(shape_i(coding, 0))
z = GpuAlloc(context_name, memset_0=True)(
as_gpuarray_variable(np.zeros((), dtype=coding.dtype), context_name),
*[shape_i(coding, i) for i in xrange(coding.ndim)])
gcoding = tensor.set_subtensor(
z[idx0, one_of_n],
gpu_neg(gpu_true_div(gy, coding[idx0, one_of_n])))
return [gcoding.transfer(context_name)]
@register_opt('fast_compile')
@op_lifter([theano.tensor.opt.Assert])
def local_gpua_assert(op, context_name, inputs, outputs):
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_assert_graph(op, context_name, inputs, outputs)
@register_opt2([theano.tensor.opt.Assert], 'fast_compile')
def local_gpua_assert_graph(op, context_name, inputs, outputs):
return [op(as_gpuarray_variable(inputs[0], context_name),
*inputs[1:])]
@register_opt('fast_compile')
@op_lifter([ConvOp])
@register_opt2([ConvOp], 'fast_compile')
def local_gpua_error_convop(op, context_name, inputs, outputs):
assert False, """
ConvOp does not work with the gpuarray backend.
Use the new convolution interface to have GPU convolution working:
theano.tensor.nnet.conv2d()
"""
@register_opt('fast_compile')
@op_lifter([SparseBlockGemv])
@register_opt2([SparseBlockGemv], 'fast_compile')
def local_gpua_sparseblockgemv(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
return
if op.inplace:
return gpu_sparse_block_gemv_inplace
else:
return gpu_sparse_block_gemv
@register_opt('fast_compile')
@op_lifter([SparseBlockOuter])
@register_opt2([SparseBlockOuter], 'fast_compile')
def local_gpua_sparseblockouter(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
return
if op.inplace:
return gpu_sparse_block_outer_inplace
else:
return gpu_sparse_block_outer
@register_inplace()
@local_optimizer([GpuSparseBlockGemv], inplace=True)
def local_inplace_sparseblockgemv(node):
if isinstance(node.op, GpuSparseBlockGemv) and not node.op.inplace:
return [gpu_sparse_block_gemv_inplace(*node.inputs)]
@register_inplace()
@local_optimizer([GpuSparseBlockOuter], inplace=True)
def local_inplace_sparseblockouter(node):
if isinstance(node.op, GpuSparseBlockOuter) and not node.op.inplace:
return [GpuSparseBlockOuter(inplace=True)(*node.inputs)]
@local_optimizer([GpuFromHost,
AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs])
def local_conv_gpu_conv(node):
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
BaseAbstractConv):
conv = host_input.owner.op
inps = list(host_input.owner.inputs)
ctx = infer_context_name(*inps)
inps[0] = as_gpuarray_variable(inps[0], context_name=ctx)
inps[1] = as_gpuarray_variable(inps[1], context_name=ctx)
out = conv(*inps)
out = theano.tensor.patternbroadcast(out,
node.outputs[0].broadcastable)
return [out]
if isinstance(node.op, BaseAbstractConv):
inp1 = node.inputs[0]
inp2 = node.inputs[1]
if ((isinstance(inp1.type, GpuArrayType) and
isinstance(inp2.type, GpuArrayType))):
return
inp1_on_gpu = (isinstance(inp1.type, GpuArrayType) or
(inp1.owner and isinstance(inp1.owner.op, HostFromGpu)))
inp2_on_gpu = (isinstance(inp2.type, GpuArrayType) or
(inp2.owner and isinstance(inp2.owner.op, HostFromGpu)))
if inp1_on_gpu or inp2_on_gpu:
conv = node.op
inps = list(node.inputs)
ctx = infer_context_name(*inps)
inps[0] = as_gpuarray_variable(inps[0], context_name=ctx)
inps[1] = as_gpuarray_variable(inps[1], context_name=ctx)
out = conv(*inps)
out = theano.tensor.patternbroadcast(
out,
node.outputs[0].broadcastable)
if isinstance(node.outputs[0].type, tensor.TensorType):
return [tensor.as_tensor_variable(out)]
else:
return [out]
register_opt()(local_conv_gpu_conv)
@local_optimizer([AbstractConv2d])
def local_abstractconv_gemm(node):
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
unshared = node.op.unshared
flip = (slice(None),) * (kern.ndim - 2) + \
(slice(None, None, -1),) * 2
kern_axes = (1, 0) + tuple(i for i in range(2, kern.ndim))
if ((border_mode == 'full') and (subsample == (1, 1)) and num_groups == 1 and not unshared):
if not node.op.filter_flip:
kern = kern[flip]
kern = kern.dimshuffle(kern_axes)
rval = GpuCorrMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
else:
if node.op.filter_flip:
kern = kern[flip]
rval = GpuCorrMM(border_mode,
subsample,
filter_dilation,
num_groups,
unshared)(gpu_contiguous(img),
gpu_contiguous(kern))
if ((subsample == (1, 1)) and (filter_dilation == (1, 1)) and
(node.op.imshp is not None) and
(None not in node.op.imshp[-2:]) and
(node.op.kshp is not None) and
(None not in node.op.kshp) and
border_mode != "half" and
num_groups == 1 and
not unshared):
prod1 = node.op.kshp[0] * node.op.kshp[-3]
prod2 = ((node.op.imshp[-2] - node.op.kshp[0] + 1) *
(node.op.imshp[-1] - node.op.kshp[-3] + 1))
if (None not in node.op.imshp[:1]):
prod1 *= node.op.imshp[0]
prod2 *= node.op.imshp[1]
if prod1 > prod2:
rval = GpuCorrMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3)))
rval = as_gpuarray_variable(
rval.dimshuffle(1, 0, 2, 3),
context_name=ctx)
return [rval]
@local_optimizer([AbstractConv2d])
def local_abstractconv_gemm_def(node):
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
unshared = node.op.unshared
if node.op.filter_flip:
flip = (slice(None),) * (kern.ndim - 2) + \
(slice(None, None, -1),) * 2
kern = kern[flip]
rval = GpuCorrMM(border_mode,
subsample,
filter_dilation,
num_groups,
unshared)(gpu_contiguous(img),
gpu_contiguous(kern))
return [rval]
@local_optimizer([AbstractConv2d])
def local_abstractconv_gemm_alt(node):
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
unshared = node.op.unshared
if border_mode == 'full' and subsample == (1, 1) and num_groups == 1 and not unshared:
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
kern = kern.dimshuffle(1, 0, 2, 3)
rval = GpuCorrMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
elif (border_mode == 'valid' and subsample == (1, 1) and filter_dilation == (1, 1) and
num_groups == 1 and not unshared):
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
rval = GpuCorrMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3)))
rval = as_gpuarray_variable(rval.dimshuffle(1, 0, 2, 3),
context_name=ctx)
else:
return None
return [rval]
@local_optimizer([AbstractConv3d])
def local_abstractconv3d_gemm(node):
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
if ((border_mode == 'full') and (subsample == (1, 1, 1)) and num_groups == 1):
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
kern = kern.dimshuffle(1, 0, 2, 3, 4)
rval = GpuCorr3dMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
else:
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = GpuCorr3dMM(border_mode,
subsample,
filter_dilation,
num_groups)(gpu_contiguous(img),
gpu_contiguous(kern))
if ((subsample == (1, 1, 1)) and (filter_dilation == (1, 1, 1)) and
(node.op.imshp is not None) and
(None not in node.op.imshp[-3:]) and
(node.op.kshp is not None) and
(None not in node.op.kshp) and
border_mode != "half" and
num_groups == 1):
prod1 = node.op.kshp[0] * node.op.kshp[1] * node.op.kshp[2]
prod2 = ((node.op.imshp[-3] - node.op.kshp[0] + 1) *
(node.op.imshp[-2] - node.op.kshp[1] + 1) *
(node.op.imshp[-1] - node.op.kshp[2] + 1))
if (None not in node.op.imshp[:1]):
prod1 *= node.op.imshp[0]
prod2 *= node.op.imshp[1]
if prod1 > prod2:
rval = GpuCorr3dMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3, 4)))
rval = as_gpuarray_variable(
rval.dimshuffle(1, 0, 2, 3, 4),
context_name=ctx)
return [rval]
@local_optimizer([AbstractConv3d])
def local_abstractconv3d_gemm_def(node):
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = GpuCorr3dMM(border_mode,
subsample,
filter_dilation,
node.op.num_groups)(gpu_contiguous(img),
gpu_contiguous(kern))
return [rval]
@local_optimizer([AbstractConv3d])
def local_abstractconv3d_alt(node):
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
if((border_mode == 'full') and (subsample == (1, 1, 1)) and
(num_groups == 1)):
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
kern = kern.dimshuffle(1, 0, 2, 3, 4)
rval = GpuCorr3dMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
elif(subsample == (1, 1, 1) and filter_dilation == (1, 1, 1) and
border_mode == 'valid' and num_groups == 1):
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = GpuCorr3dMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3, 4)))
rval = as_gpuarray_variable(rval.dimshuffle(1, 0, 2, 3, 4),
context_name=ctx)
else:
return None
return [rval]
@local_optimizer([AbstractConv3d])
def local_abstractconv3d2d(node):
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
if(subsample == (1, 1, 1) and filter_dilation == (1, 1, 1) and
num_groups == 1):
reorder_array = [0, 2, 1, 3, 4]
rval = conv3d2d.conv3d(gpu_contiguous(img.dimshuffle(*reorder_array)),
gpu_contiguous(kern.dimshuffle(*reorder_array)),
[node.op.imshp[i] for i in reorder_array],
[node.op.kshp[i] for i in reorder_array],
border_mode=border_mode)
rval = as_gpuarray_variable(rval.dimshuffle(*reorder_array),
context_name=ctx)
return [rval]
else:
return None
@local_optimizer([AbstractConv2d_gradWeights])
def local_abstractconv_gradweights_gemm(node):
if not isinstance(node.op, AbstractConv2d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
rval = GpuCorrMM_gradWeights(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups,
unshared=node.op.unshared)(
gpu_contiguous(img), gpu_contiguous(topgrad), shape)
flip = (slice(None),) * (rval.ndim - 2) + \
(slice(None, None, -1),) * 2
if node.op.filter_flip:
rval = rval[flip]
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
@local_optimizer([AbstractConv2d_gradWeights])
def local_abstractconv_gemm_gradweights_alt(node):
if not isinstance(node.op, AbstractConv2d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
unshared = node.op.unshared
if(border_mode == 'valid' and subsample == (1, 1) and filter_dilation == (1, 1) and
num_groups == 1 and not unshared):
rval = GpuCorrMM(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),
gpu_contiguous(topgrad.dimshuffle(1, 0, 2, 3)))
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1]
rval = rval.dimshuffle(1, 0, 2, 3)
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
else:
return None
@local_optimizer([AbstractConv3d_gradWeights])
def local_abstractconv3d_gemm_gradweights_alt(node):
if not isinstance(node.op, AbstractConv3d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
if(border_mode == 'valid' and subsample == (1, 1, 1) and
filter_dilation == (1, 1, 1) and num_groups == 1):
rval = GpuCorr3dMM(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4)),
gpu_contiguous(topgrad.dimshuffle(1, 0, 2, 3, 4)))
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1, ::-1]
rval = rval.dimshuffle(1, 0, 2, 3, 4)
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
else:
return None
@local_optimizer([AbstractConv3d_gradWeights])
def local_abstractconv3d_gradweights_gemm(node):
if not isinstance(node.op, AbstractConv3d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
rval = GpuCorr3dMM_gradWeights(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups)(
gpu_contiguous(img), gpu_contiguous(topgrad), shape)
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1, ::-1]
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
@local_optimizer([AbstractConv2d_gradInputs])
def local_abstractconv_gradinputs_gemm(node):
if not isinstance(node.op, AbstractConv2d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
if node.op.filter_flip:
flip = (slice(None),) * (kern.ndim - 2) + \
(slice(None, None, -1),) * 2
kern = kern[flip]
rval = GpuCorrMM_gradInputs(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups,
unshared=node.op.unshared)(
gpu_contiguous(kern), gpu_contiguous(topgrad), shape)
return [rval]
@local_optimizer([AbstractConv2d_gradInputs])
def local_abstractconv_gradinputs_gemm_alt(node):
if not isinstance(node.op, AbstractConv2d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
unshared = node.op.unshared
if border_mode == 'valid' and subsample == (1, 1) and num_groups == 1 and not unshared:
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
rval = GpuCorrMM(border_mode='full',
subsample=subsample,
filter_dilation=filter_dilation)(
gpu_contiguous(topgrad),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3)))
return [rval]
else:
return None
@local_optimizer([AbstractConv3d_gradInputs])
def local_abstractconv3d_gradinputs_gemm(node):
if not isinstance(node.op, AbstractConv3d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = GpuCorr3dMM_gradInputs(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups)(
gpu_contiguous(kern), gpu_contiguous(topgrad), shape)
return [rval]
@local_optimizer([AbstractConv3d_gradInputs])
def local_abstractconv3d_gradinputs_gemm_alt(node):
if not isinstance(node.op, AbstractConv3d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
num_groups = node.op.num_groups
if(border_mode == 'valid' and subsample == (1, 1, 1) and
num_groups == 1):
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = GpuCorr3dMM(border_mode='full',
subsample=subsample,
filter_dilation=filter_dilation)(
gpu_contiguous(topgrad),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3, 4)))
return [rval]
else:
return None
class ConvMetaOptimizer(LocalMetaOptimizer):
def __init__(self):
super(ConvMetaOptimizer, self).__init__()
def time_call(self, fn):
start = time.time()
fn()[0].sync()
return time.time() - start
def provide_inputs(self, node, inputs):
result = {}
shapes = (node.op.imshp, node.op.kshp)
if(node.op.imshp is None or node.op.kshp is None or
any([s is None for shape in shapes for s in shape])):
return result
if type(node.op) in [AbstractConv2d, AbstractConv3d]:
img, kern = node.inputs
for(var, shape) in zip((img, kern), shapes):
result[var] = theano.shared(np.random.random(shape).astype(var.dtype),
var.name,
broadcastable=var.broadcastable,
borrow=True)
if type(node.op) in [AbstractConv2d_gradWeights, AbstractConv3d_gradWeights]:
img, top, kshape = node.inputs
tshp = get_conv_output_shape(node.op.imshp,
node.op.kshp,
node.op.border_mode,
node.op.subsample,
node.op.filter_dilation)
convdim = img.ndim - 2
result[kshape] = theano.tensor.as_tensor_variable(node.op.kshp[-convdim:])
for(var, shape) in zip((img, top), (node.op.imshp, tshp)):
result[var] = theano.shared(np.random.random(shape).astype(var.dtype),
var.name,
broadcastable=var.broadcastable,
borrow=True)
if type(node.op) in [AbstractConv2d_gradInputs, AbstractConv3d_gradInputs]:
kern, top, ishape = node.inputs
tshp = get_conv_output_shape(node.op.imshp,
node.op.kshp,
node.op.border_mode,
node.op.subsample,
node.op.filter_dilation)
result[ishape] = theano.tensor.as_tensor_variable(node.op.imshp[2:])
for(var, shape) in zip((kern, top), (node.op.kshp, tshp)):
result[var] = theano.shared(np.random.random(shape).astype(var.dtype),
var.name,
broadcastable=var.broadcastable,
borrow=True)
return result
def get_opts(self, node):
opts = Counter([opt for opt in self.track_dict[type(node.op)]
if opt in self.tag_dict['default']])
include_tags = config.metaopt.optimizer_including.split(':')
exclude_tags = config.metaopt.optimizer_excluding.split(':')
for in_opt in include_tags:
opts.update([opt for opt in self.track_dict[type(node.op)]
if opt in self.tag_dict[in_opt]])
for ex_opt in exclude_tags:
opts.subtract([opt for opt in self.track_dict[type(node.op)]
if opt in self.tag_dict[ex_opt]])
opts = list(opts + Counter())
return opts
@register_opt('fast_compile', 'conv_dnn', 'cudnn')
@op_lifter([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs])
def local_gpua_abstractconv(op, context_name, inputs, outputs):
if isinstance(outputs[0].type, GpuArrayType):
return
return local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs)
@register_opt2([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs], 'fast_compile')
def local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs):
inps = list(inputs)
inps[0] = as_gpuarray_variable(inputs[0],
context_name=context_name)
inps[1] = as_gpuarray_variable(inputs[1],
context_name=context_name)
return [op(*inps)]
def local_gpu_pool(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
op = GpuPool(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, ws, stride, pad)
else:
inp_padded = pad_dims(inp, 2, nd)
ret_padded = op(inp_padded, ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
pool_db = LocalGroupDB()
pool_db2 = LocalGroupDB(local_opt=theano.gof.opt.GraphToGPULocalOptGroup)
pool_db2.__name__ = "pool_db2"
lifter = op_lifter([pool.Pool])(local_gpu_pool)
pool_db.register("local_gpu_pool", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_pool",
local_optimizer([pool.Pool])(local_gpu_pool),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
register_opt('fast_compile', name='pool_db')(pool_db)
register_opt2([pool.Pool], 'fast_compile', name='pool_db2')(pool_db2)
def local_gpu_max_pool_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out = gpu_contiguous(as_gpuarray_variable(out, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuMaxPoolGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out, out_grad, ws, stride, pad)
else:
inp_padded = pad_dims(inp, 2, nd)
out_padded = pad_dims(out, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
lifter = op_lifter([pool.MaxPoolGrad])(local_gpu_max_pool_grad)
pool_db.register("local_gpu_max_pool_grad", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_max_pool_grad",
local_optimizer([pool.MaxPoolGrad])(local_gpu_max_pool_grad),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
def local_gpu_average_pool_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuAveragePoolGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out_grad, ws, stride, pad)
else:
inp_padded = pad_dims(inp, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
lifter = op_lifter([pool.AveragePoolGrad])(local_gpu_average_pool_grad)
pool_db.register("local_gpu_average_pool_grad", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_average_pool_grad",
local_optimizer([pool.AveragePoolGrad])(local_gpu_average_pool_grad),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
@register_opt()
@op_lifter([pool.DownsampleFactorMaxGradGrad])
@register_opt2([pool.DownsampleFactorMaxGradGrad])
def local_gpu_downsample_factor_max_grad_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out = gpu_contiguous(as_gpuarray_variable(out, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuDownsampleFactorMaxGradGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out, out_grad, ws, stride, pad)
else:
inp_padded = pad_dims(inp, 2, nd)
out_padded = pad_dims(out, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
@register_opt()
@op_lifter([pool.MaxPoolRop])
@register_opt2([pool.MaxPoolRop])
def local_gpu_max_pool_rop(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, eval_inp, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
eval_inp = gpu_contiguous(as_gpuarray_variable(eval_inp, ctx_name))
op = GpuMaxPoolRop(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, eval_inp, ws, stride, pad)
else:
inp_padded = pad_dims(inp, 2, nd)
eval_inp_padded = pad_dims(eval_inp, 2, nd)
ret_padded = op(inp_padded, eval_inp_padded, ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
@register_opt("low_memory")
@local_optimizer([GpuCAReduceCuda])
def local_gpu_elemwise_careduce(node):
if (isinstance(node.op, GpuCAReduceCuda) and
node.op.pre_scalar_op is None and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, GpuElemwise) and
# automatically add more case, as some like trigonometic
# operation with some reduction pattern will probably results
# in slow down.
isinstance(node.inputs[0].owner.op.scalar_op, (scalar.basic.Sqr,
scalar.basic.Abs))):
inp = node.inputs[0].owner.inputs[0]
props = node.op._props_dict()
props["pre_scalar_op"] = node.inputs[0].owner.op.scalar_op
with inherit_stack_trace(node.outputs):
out = GpuCAReduceCuda(**props)(inp)
return [out]
@local_optimizer(None)
def local_assert_no_cpu_op(node):
if (all([var.owner and isinstance(var.owner.op, HostFromGpu)
for var in node.inputs]) and
any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)]
for var in node.outputs])):
if config.assert_no_cpu_op == "warn":
_logger.warning(("CPU Op %s is detected in the computation "
"graph") % node)
elif config.assert_no_cpu_op == "raise":
raise AssertionError("The Op %s is on CPU." % node)
elif config.assert_no_cpu_op == "pdb":
pdb.set_trace()
# Register the local_assert_no_cpu_op:
assert_no_cpu_op = theano.tensor.opt.in2out(local_assert_no_cpu_op,
name='assert_no_cpu_op')
# 49.2 is after device specialization & fusion optimizations for last transfers
optdb.register('gpua_assert_no_cpu_op', assert_no_cpu_op, 49.2,
'assert_no_cpu_op')
def tensor_to_gpu(x, context_name):
if isinstance(x.type, tensor.TensorType):
y = GpuArrayType(broadcastable=x.type.broadcastable,
context_name=context_name,
dtype=x.type.dtype)()
if x.name:
y.name = x.name + '[Gpua]'
return y
else:
return x
def gpu_safe_new(x, tag=''):
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else:
nw_name = None
if isinstance(x, theano.Constant):
return x.clone()
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def gpu_reconstruct_graph(inputs, outputs, tag=None):
if tag is None:
tag = ''
nw_inputs = [gpu_safe_new(x, tag) for x in inputs]
givens = {}
for nw_x, x in zip(nw_inputs, inputs):
givens[x] = nw_x
nw_outputs = scan_utils.clone(outputs, replace=givens)
return (nw_inputs, nw_outputs)
@register_opt('scan', 'fast_compile')
@op_lifter([scan_op.Scan])
@register_opt2([scan_op.Scan], 'fast_compile')
def local_gpua_scan_to_gpua(op, context_name, inputs, outputs):
info = copy.deepcopy(op.info)
if info.get('gpua', False):
return
info['gpua'] = True
nw_ins = [inputs[0]]
e = (1 +
op.n_seqs +
op.n_mit_mot +
op.n_mit_sot +
op.n_sit_sot +
op.n_shared_outs)
nw_ins += [safe_to_gpu(x, context_name) for x in inputs[1:e]]
b = e
e = e + op.n_nit_sot
nw_ins += inputs[b:e]
nw_ins += [safe_to_gpu(x, context_name) for x in inputs[e:]]
scan_ins = [tensor_to_gpu(x, context_name) for x in op.inputs]
# The inner output corresponding to the looping condition should not be
# moved to the gpu
if op.info['as_while']:
scan_outs = [safe_to_gpu(x, context_name) for x in op.outputs[:-1]]
scan_outs += [op.outputs[-1]]
else:
scan_outs = [safe_to_gpu(x, context_name) for x in op.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=list(zip(op.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about the gpu and can not
# handle graphs with inputs being on the gpu
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
info['gpu_hash'] = hash(_cmodule_key)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
nw_op = scan_op.Scan(scan_ins, scan_outs, info,
typeConstructor=typebuild).make_node(*nw_ins)
return nw_op.outputs
def _scan_type_infer(node):
context_name = infer_context_name(*node.inputs)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
return typebuild
# Add optimization : maxandargmax (CPU -> GPU)
@register_opt('fast_compile')
@op_lifter([tensor.MaxAndArgmax])
@register_opt2([tensor.MaxAndArgmax], 'fast_compile')
def local_gpu_maxandargmax(op, context_name, inputs, outputs):
op = GpuMaxAndArgmax(op.get_params(None))
if inputs[0].dtype == "float16":
# For now it is better to copy/cast on the GPU then transfer to the CPU
casted_inputs = inputs[0].astype('float32')
ret = op(casted_inputs)
return [ret[0].astype('float16'), ret[1]]
return op
@register_opt('fast_compile')
@op_lifter([Images2Neibs])
@register_opt2([Images2Neibs], 'fast_compile')
def local_gpua_images2neibs(op, context_name, inputs, outputs):
if op.mode in ['valid', 'half', 'full', 'ignore_borders', 'wrap_centered']:
return GpuImages2Neibs(op.mode)
# solve
@register_opt('fast_compile')
@op_lifter([slinalg.Solve])
@register_opt2([theano.tensor.slinalg.Solve], 'fast_compile')
def local_gpu_solve(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float16', 'float32']:
return
if op.A_structure not in MATRIX_STRUCTURES_SOLVE:
return
if op.A_structure in ['lower_triangular', 'upper_triangular']:
if not cublas_available:
return
lower = op.A_structure == 'lower_triangular'
op = GpuCublasTriangularSolve(lower)
else:
if not cusolver_available:
return
op = GpuCusolverSolve(A_structure=op.A_structure)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32'),
inputs[1].astype('float32')).astype('float16')
return op
@register_inplace()
@local_optimizer([GpuCusolverSolve], inplace=True)
def local_inplace_gpu_solve(node):
if isinstance(node.op, GpuCusolverSolve) and not node.op.inplace:
with inherit_stack_trace(node.outputs):
return [GpuCusolverSolve(A_structure=node.op.A_structure, trans=node.op.trans,
inplace=True)(*node.inputs)]
# Cholesky decomposition
def local_gpu_cholesky(op, context_name, inputs, outputs):
if not cusolver_available:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuCholesky(lower=op.lower, inplace=op.destructive)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
matrix_ops_db = LocalGroupDB()
matrix_ops_db2 = LocalGroupDB(local_opt=theano.gof.opt.GraphToGPULocalOptGroup)
matrix_ops_db2.__name__ = "matrix_ops_db2"
# For Cholesky decomposition, magma 2.2 is slower than cusolver 8 (tested for
# matrices of size 1000). Thus, cusolver is prioritized during graph
# optimizations. To explicitly use magma, you should disable cusolver using
# `optimizer_excluding=cusolver` in Theano config.
lifter = op_lifter([slinalg.Cholesky])(local_gpu_cholesky)
matrix_ops_db.register("local_gpu_cholesky", lifter,
'gpuarray', 'fast_compile', 'fast_run', 'cusolver',
position=0)
matrix_ops_db2.register("local_gpu_cholesky",
local_optimizer([slinalg.Cholesky])(local_gpu_cholesky),
'gpuarray', 'fast_compile', 'fast_run', 'cusolver',
position=0)
register_opt('fast_compile', name='matrix_ops_db')(matrix_ops_db)
register_opt2([slinalg.Solve], 'fast_compile', name='matrix_ops_db2')(matrix_ops_db2)
@register_inplace()
@local_optimizer([GpuCholesky], inplace=True)
def local_inplace_gpu_cholesky(node):
if isinstance(node.op, GpuCholesky) and not node.op.inplace:
with inherit_stack_trace(node.outputs):
return [node.op.clone_inplace()(*node.inputs)]
def local_gpu_magma_cholesky(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuMagmaCholesky(lower=op.lower, inplace=op.destructive)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
lifter = op_lifter([slinalg.Cholesky])(local_gpu_magma_cholesky)
matrix_ops_db.register("local_gpu_magma_cholesky", lifter,
'gpuarray', 'fast_compile', 'fast_run', 'magma',
position=1)
matrix_ops_db2.register("local_gpu_magma_cholesky",
local_optimizer([slinalg.Cholesky])(local_gpu_magma_cholesky),
'gpuarray', 'fast_compile', 'fast_run', 'magma',
position=1)
@register_inplace()
@local_optimizer([GpuMagmaCholesky], inplace=True)
def local_inplace_gpu_magma_cholesky(node):
if isinstance(node.op, GpuMagmaCholesky) and not node.op.inplace:
return [node.op.clone_inplace()(*node.inputs)]
# QR decomposition
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.QRFull])
@register_opt2([theano.tensor.nlinalg.QRFull], 'magma', 'fast_compile')
def local_gpu_magma_qr(op, context_name, inputs, outputs):
if not config.magma.enabled or op.mode != 'reduced':
return
if inputs[0].dtype not in ['float16', 'float32']:
return
x = inputs[0]
if inputs[0].dtype == 'float16':
x = inputs[0].astype('float32')
out = gpu_qr(x, complete=True)
if inputs[0].dtype == 'float16':
return [o.astype('float16') for o in out]
return out
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.QRIncomplete])
@register_opt2([theano.tensor.nlinalg.QRIncomplete], 'magma', 'fast_compile')
def local_gpu_magma_qr_incomplete(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
x = inputs[0]
if inputs[0].dtype == 'float16':
x = inputs[0].astype('float32')
out = gpu_qr(x, complete=False)
if inputs[0].dtype == 'float16':
return [out.astype('float16')]
return out
# Matrix inverse
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.MatrixInverse])
@register_opt2([theano.tensor.nlinalg.MatrixInverse], 'magma', 'fast_compile')
def local_gpu_magma_matrix_inverse(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuMagmaMatrixInverse()
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
@register_inplace()
@local_optimizer([GpuMagmaMatrixInverse])
def local_inplace_gpu_magma_matrix_inverse(node):
if isinstance(node.op, GpuMagmaMatrixInverse) and not node.op.inplace:
with inherit_stack_trace(node.outputs):
return [node.op.clone_inplace()(*node.inputs)]
# Eigen decomposition of a symmetric matrix
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.Eigh])
@register_opt2([theano.tensor.nlinalg.Eigh], 'magma', 'fast_compile')
def local_gpu_magma_eigh(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuMagmaEigh(UPLO=op.UPLO, compute_v=True)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
# Singular Value Decomposition
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.SVD])
@register_opt2([theano.tensor.nlinalg.SVD], 'magma', 'fast_compile')
def local_gpu_magma_svd(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
x = inputs[0]
if inputs[0].dtype == 'float16':
x = inputs[0].astype('float32')
out = gpu_svd(x, compute_uv=op.compute_uv, full_matrices=op.full_matrices)
if inputs[0].dtype == 'float16':
if op.compute_uv:
out = [o.astype('float16') for o in out]
else:
out = [out.astype('float16')]
return out
@register_opt('ctc', 'fast_compile')
@op_lifter([theano.tensor.nnet.ctc.ConnectionistTemporalClassification])
@register_opt2([ConnectionistTemporalClassification], 'ctc', 'fast_compile')
def local_gpu_ctc(op, context_name, inputs, outputs):
op = GpuConnectionistTemporalClassification(compute_grad=op.compute_grad)
return op.make_node(*inputs).outputs
# Do not register in fast_run or fast_compile.
# It will be added to fast_run if the GPU is enabled.
optdb.register('gpua_scanOp_make_inplace',
scan_opt.ScanInplaceOptimizer(typeInfer=_scan_type_infer,
gpua_flag=True),
75,
'gpuarray',
'inplace',
'scan')
# Register GPU convolution implementation
# They are tried in a specific order so we can control
# which ones take precedence over others.
abstractconv_groupopt = theano.gof.optdb.LocalGroupDB()
abstractconv_groupopt.__name__ = "gpuarray_abstractconv_opts"
register_opt('fast_compile')(abstractconv_groupopt)
# We import these opts here instead of at the top of this file
# to avoid a circular dependency problem with dnn
from .dnn import (local_abstractconv_cudnn,
local_abstractconv_gw_cudnn,
local_abstractconv_gi_cudnn, # noqa: 402
local_abstractconv_cudnn_alt,
local_abstractconv3d_cudnn_alt)
abstractconv_groupopt.register('local_abstractconv_dnn',
local_abstractconv_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
abstractconv_groupopt.register('local_abstractconv_gw_dnn',
local_abstractconv_gw_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
abstractconv_groupopt.register('local_abstractconv_gi_dnn',
local_abstractconv_gi_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
# The GEMM-based convolution comes last to catch all remaining cases.
# It can be disabled by excluding 'conv_gemm'.
abstractconv_groupopt.register('local_abstractconv_gemm', local_abstractconv_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gemm', local_abstractconv3d_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv_gradweights_gemm',
local_abstractconv_gradweights_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gradweights_gemm',
local_abstractconv3d_gradweights_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv_gradinputs',
local_abstractconv_gradinputs_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gradinputs',
local_abstractconv3d_gradinputs_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
conv_metaopt = ConvMetaOptimizer()
conv_metaopt.register(local_abstractconv_cudnn,
['default', 'cudnn', 'conv_dnn'])
conv_metaopt.register(local_abstractconv_gw_cudnn,
['default', 'cudnn', 'conv_dnn'])
conv_metaopt.register(local_abstractconv_gi_cudnn,
['default', 'cudnn', 'conv_dnn'])
conv_metaopt.register(local_abstractconv_gemm_def,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d_gemm_def,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_gradweights_gemm,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d_gradweights_gemm,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_gradinputs_gemm,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d_gradinputs_gemm,
['default', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_gemm_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_gemm_gradweights_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_gradinputs_gemm_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv_cudnn_alt,
['default', 'alternative', 'cudnn', 'conv_dnn'])
conv_metaopt.register(local_abstractconv3d_cudnn_alt,
['default', 'alternative', 'cudnn', 'conv_dnn'])
conv_metaopt.register(local_abstractconv3d_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d_gemm_gradweights_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d_gradinputs_gemm_alt,
['default', 'alternative', 'conv_gemm'])
conv_metaopt.register(local_abstractconv3d2d,
['alternative', 'conv3d2d'])
abstractconv_groupopt.register('conv_metaopt', conv_metaopt, 'conv_meta', position=0)
# Register cuDNN batch normalization implementation
# We import these opts here instead of at the top of this file
# to avoid a circular dependency problem with dnn
from .dnn import (local_abstract_batch_norm_train_cudnn,
local_abstract_batch_norm_train_grad_cudnn,
local_abstract_batch_norm_inference_cudnn) # noqa: 402
abstract_batch_norm_groupopt = theano.gof.optdb.LocalGroupDB()
abstract_batch_norm_groupopt.__name__ = "gpuarray_batchnorm_opts"
register_opt('fast_compile')(abstract_batch_norm_groupopt)
abstract_batch_norm_db = LocalGroupDB()
abstract_batch_norm_db2 = LocalGroupDB(
local_opt=theano.gof.opt.GraphToGPULocalOptGroup)
abstract_batch_norm_db2.__name__ = "abstract_batch_norm_db2"
register_opt('fast_compile', name='abstract_batch_norm_db')(
abstract_batch_norm_db)
register_opt2([bn.AbstractBatchNormTrain,
bn.AbstractBatchNormTrainGrad,
bn.AbstractBatchNormInference],
'fast_compile', name='abstract_batch_norm_db2')(
abstract_batch_norm_db2)
for op, fct, cpu in [(bn.AbstractBatchNormTrain,
local_abstract_batch_norm_train_cudnn,
bn.local_abstract_batch_norm_train),
(bn.AbstractBatchNormTrainGrad,
local_abstract_batch_norm_train_grad_cudnn,
bn.local_abstract_batch_norm_train_grad),
(bn.AbstractBatchNormInference,
local_abstract_batch_norm_inference_cudnn,
bn.local_abstract_batch_norm_inference)]:
lifter = op_lifter([op])(fct)
abstract_batch_norm_db.register(fct.__name__,
lifter,
'gpuarray', 'fast_compile', 'fast_run',
'cudnn', 'batchnorm_dnn',
position=1)
abstract_batch_norm_db2.register(fct.__name__,
local_optimizer([op])(fct),
'gpuarray', 'fast_compile', 'fast_run',
'cudnn', 'batchnorm_dnn',
position=1)
# cpu is a normal optimization. We can't register it in
abstract_batch_norm_db.register(cpu.__name__, cpu,
'gpuarray', 'fast_compile', 'fast_run',
position='last')
| true | true |
1c3a8f24f03cf22ef585b3ddd3df1a97ed17f540 | 3,407 | py | Python | src/experiment3.py | jbrowarczyk/jb-masters-thesis | c345f43b32126d16f10c3706f5f798fde0665ee0 | [
"MIT"
] | null | null | null | src/experiment3.py | jbrowarczyk/jb-masters-thesis | c345f43b32126d16f10c3706f5f798fde0665ee0 | [
"MIT"
] | null | null | null | src/experiment3.py | jbrowarczyk/jb-masters-thesis | c345f43b32126d16f10c3706f5f798fde0665ee0 | [
"MIT"
] | null | null | null | from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from global_settings import TRAIN_VERBOSE
from utils import make_train_data, make_test_data, save_txt
import numpy as np
import joblib
import os
EXPERIMENT_NAME = "experiment3"
FEATURES = ["ar_16","ar_24","dwt","dwt_stat","welch_16","welch_32","welch_64"]
C_VALUES = [0.01,0.1,1,10,100]
GAMMA_VALUES = [0.1, 1, 10]
SKIP_COMBINATIONS = set([('dwt',10,0.1),('dwt',10,1),('dwt',10,10),
('dwt',100,0.1),('dwt',100,1),('dwt',100,10),
('dwt_stat',10,10),
('ar_24',100,0.1),('ar_24',100,1),('ar_24',100,10)])
SAVE_RESULTS = True # saves results in single file using joblib library
SAVE_RESULTS_TXT = True # saves results in .txt file
SAVE_MODEL = False # saves trained model
def experiment_svm_rbf(train_data,train_data_classes,test_data,test_data_classes,c,gamma,verbose):
try:
svm = SVC(C=c,kernel='rbf',gamma=gamma,verbose=verbose)
svm.fit(train_data,train_data_classes)
results = svm.predict(test_data)
score = accuracy_score(test_data_classes,results)
report = classification_report(test_data_classes,results,digits=4,output_dict=False)
report_dict = classification_report(test_data_classes,results,output_dict=True)
cm = confusion_matrix(test_data_classes,results)
res = {}
res['results'] = results
res['accuracy_score'] = score
res['classification_report'] = report
res['classification_report_dict'] = report_dict
res['confusion_matrix'] = cm
return res,svm
except Exception as e:
print(e)
return None,None
def main():
if(EXPERIMENT_NAME not in os.listdir()):
os.mkdir(EXPERIMENT_NAME)
for feature in FEATURES:
try:
data = np.load(feature + "_stats.npy",allow_pickle=True).item()
pca = joblib.load("pca_" + feature + "_stats")
train_data, train_data_classes = make_train_data(data,True)
test_data, test_data_classes = make_test_data(data)
train_data_pca = np.array(pca.transform(train_data))
test_data_pca = np.array(pca.transform(test_data))
for c in C_VALUES:
for gamma in GAMMA_VALUES:
if (feature,c,gamma) in SKIP_COMBINATIONS:
print("Skipping " + feature + " SVM-rbf C = " + str(c) + " gamma = " + str(gamma))
continue
print("Computing " + feature + " SVM-rbf C = " + str(c) + " gamma = " + str(gamma))
res,model = experiment_svm_rbf(train_data_pca,train_data_classes,test_data_pca,test_data_classes,c,gamma,TRAIN_VERBOSE)
if res != None:
if SAVE_RESULTS:
filename = EXPERIMENT_NAME + "_" + feature + " svm_rbf_c_" + str(c) + "_gamma_" + str(gamma) + "_results"
path = os.path.join(EXPERIMENT_NAME,filename)
joblib.dump(res,path)
if SAVE_RESULTS_TXT:
filename = EXPERIMENT_NAME + "_" + feature + " svm_rbf_c_" + str(c) + "_gamma_" + str(gamma) + "_results.txt"
path = os.path.join(EXPERIMENT_NAME,filename)
save_txt(res,path)
if SAVE_MODEL:
filename = EXPERIMENT_NAME + "_" + feature + " svm_rbf_c_" + str(c) + "_gamma_" + str(gamma) + "_model"
path = os.path.join(EXPERIMENT_NAME,filename)
joblib.dump(model,path)
except Exception as e:
print("Error during " + EXPERIMENT_NAME + " " + feature + " SVM-RBF C = " + str(c) + " gamma = " + str(gamma))
print(e)
pass
if __name__ == "__main__":
main()
| 36.244681 | 124 | 0.681538 | from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from global_settings import TRAIN_VERBOSE
from utils import make_train_data, make_test_data, save_txt
import numpy as np
import joblib
import os
EXPERIMENT_NAME = "experiment3"
FEATURES = ["ar_16","ar_24","dwt","dwt_stat","welch_16","welch_32","welch_64"]
C_VALUES = [0.01,0.1,1,10,100]
GAMMA_VALUES = [0.1, 1, 10]
SKIP_COMBINATIONS = set([('dwt',10,0.1),('dwt',10,1),('dwt',10,10),
('dwt',100,0.1),('dwt',100,1),('dwt',100,10),
('dwt_stat',10,10),
('ar_24',100,0.1),('ar_24',100,1),('ar_24',100,10)])
SAVE_RESULTS = True
SAVE_RESULTS_TXT = True
SAVE_MODEL = False
def experiment_svm_rbf(train_data,train_data_classes,test_data,test_data_classes,c,gamma,verbose):
try:
svm = SVC(C=c,kernel='rbf',gamma=gamma,verbose=verbose)
svm.fit(train_data,train_data_classes)
results = svm.predict(test_data)
score = accuracy_score(test_data_classes,results)
report = classification_report(test_data_classes,results,digits=4,output_dict=False)
report_dict = classification_report(test_data_classes,results,output_dict=True)
cm = confusion_matrix(test_data_classes,results)
res = {}
res['results'] = results
res['accuracy_score'] = score
res['classification_report'] = report
res['classification_report_dict'] = report_dict
res['confusion_matrix'] = cm
return res,svm
except Exception as e:
print(e)
return None,None
def main():
if(EXPERIMENT_NAME not in os.listdir()):
os.mkdir(EXPERIMENT_NAME)
for feature in FEATURES:
try:
data = np.load(feature + "_stats.npy",allow_pickle=True).item()
pca = joblib.load("pca_" + feature + "_stats")
train_data, train_data_classes = make_train_data(data,True)
test_data, test_data_classes = make_test_data(data)
train_data_pca = np.array(pca.transform(train_data))
test_data_pca = np.array(pca.transform(test_data))
for c in C_VALUES:
for gamma in GAMMA_VALUES:
if (feature,c,gamma) in SKIP_COMBINATIONS:
print("Skipping " + feature + " SVM-rbf C = " + str(c) + " gamma = " + str(gamma))
continue
print("Computing " + feature + " SVM-rbf C = " + str(c) + " gamma = " + str(gamma))
res,model = experiment_svm_rbf(train_data_pca,train_data_classes,test_data_pca,test_data_classes,c,gamma,TRAIN_VERBOSE)
if res != None:
if SAVE_RESULTS:
filename = EXPERIMENT_NAME + "_" + feature + " svm_rbf_c_" + str(c) + "_gamma_" + str(gamma) + "_results"
path = os.path.join(EXPERIMENT_NAME,filename)
joblib.dump(res,path)
if SAVE_RESULTS_TXT:
filename = EXPERIMENT_NAME + "_" + feature + " svm_rbf_c_" + str(c) + "_gamma_" + str(gamma) + "_results.txt"
path = os.path.join(EXPERIMENT_NAME,filename)
save_txt(res,path)
if SAVE_MODEL:
filename = EXPERIMENT_NAME + "_" + feature + " svm_rbf_c_" + str(c) + "_gamma_" + str(gamma) + "_model"
path = os.path.join(EXPERIMENT_NAME,filename)
joblib.dump(model,path)
except Exception as e:
print("Error during " + EXPERIMENT_NAME + " " + feature + " SVM-RBF C = " + str(c) + " gamma = " + str(gamma))
print(e)
pass
if __name__ == "__main__":
main()
| true | true |
1c3a9025eae1873418a0515dcc1c81865dc85b2e | 1,054 | py | Python | hot_encoding/hot_encoding.py | JOHNKYON/Kaggle_Learn | 6a45931e4ec1e189b95c61e27e90499347840180 | [
"MIT"
] | null | null | null | hot_encoding/hot_encoding.py | JOHNKYON/Kaggle_Learn | 6a45931e4ec1e189b95c61e27e90499347840180 | [
"MIT"
] | null | null | null | hot_encoding/hot_encoding.py | JOHNKYON/Kaggle_Learn | 6a45931e4ec1e189b95c61e27e90499347840180 | [
"MIT"
] | null | null | null | """Python script for kaggle house price predict practice"""
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import Imputer
import pandas as pd
def main():
"""Main script"""
# Load data
train_data = pd.read_csv('data/train.csv')
test_data = pd.read_csv('data/test.csv')
y_train = train_data.SalePrice
x_train = train_data.drop(['SalePrice'], axis=1)
x_test = test_data
# Encoding data
x_train = pd.get_dummies(x_train)
x_test = pd.get_dummies(x_test)
x_train, x_test = x_train.align(x_test, join='left', axis=1)
# Impute data
my_imputer = Imputer()
x_train = my_imputer.fit_transform(x_train)
x_test = my_imputer.transform(x_test)
print(x_train)
# Get model
model = RandomForestRegressor()
model.fit(x_train, y_train)
pred = model.predict(x_test)
# Output
submission = pd.DataFrame({'Id': test_data.Id, 'SalePrice': pred})
submission.to_csv("hot_encoding/submission.csv", index=False)
if __name__ == '__main__':
main()
| 25.707317 | 70 | 0.689753 |
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import Imputer
import pandas as pd
def main():
train_data = pd.read_csv('data/train.csv')
test_data = pd.read_csv('data/test.csv')
y_train = train_data.SalePrice
x_train = train_data.drop(['SalePrice'], axis=1)
x_test = test_data
x_train = pd.get_dummies(x_train)
x_test = pd.get_dummies(x_test)
x_train, x_test = x_train.align(x_test, join='left', axis=1)
my_imputer = Imputer()
x_train = my_imputer.fit_transform(x_train)
x_test = my_imputer.transform(x_test)
print(x_train)
model = RandomForestRegressor()
model.fit(x_train, y_train)
pred = model.predict(x_test)
submission = pd.DataFrame({'Id': test_data.Id, 'SalePrice': pred})
submission.to_csv("hot_encoding/submission.csv", index=False)
if __name__ == '__main__':
main()
| true | true |
1c3a91564e3c7ad9a2f962c279bae9bd74910638 | 36,430 | py | Python | tests/staticfiles_tests/tests.py | dsanders11/django-future-staticfiles | 47aef8cdcfc76d2099884fb0ed77e39e3974f78b | [
"BSD-3-Clause"
] | null | null | null | tests/staticfiles_tests/tests.py | dsanders11/django-future-staticfiles | 47aef8cdcfc76d2099884fb0ed77e39e3974f78b | [
"BSD-3-Clause"
] | null | null | null | tests/staticfiles_tests/tests.py | dsanders11/django-future-staticfiles | 47aef8cdcfc76d2099884fb0ed77e39e3974f78b | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import os
import posixpath
import shutil
import sys
import tempfile
import unittest
import django
from django.conf import settings
from django.contrib.staticfiles import finders, storage
from django.contrib.staticfiles.management.commands import collectstatic
from django.contrib.staticfiles.management.commands.collectstatic import \
Command as CollectstaticCommand
from django.core.cache.backends.base import BaseCache
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.template import Context, Template
from django.test import SimpleTestCase
try:
from django.test.utils import override_settings
except:
from django.test import override_settings
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import empty
from django_future_staticfiles.storage import StaticFilesStorage
TEST_ROOT = os.path.dirname(upath(__file__))
TESTFILES_PATH = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test')
TEST_SETTINGS = {
'DEBUG': True,
'MEDIA_URL': '/media/',
'STATIC_URL': '/static/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'media'),
'STATIC_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'static'),
'STATICFILES_DIRS': [
os.path.join(TEST_ROOT, 'project', 'documents'),
('prefix', os.path.join(TEST_ROOT, 'project', 'prefixed')),
],
'STATICFILES_FINDERS': [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
],
'INSTALLED_APPS': [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'staticfiles_tests',
'staticfiles_tests.apps.test',
'staticfiles_tests.apps.no_label',
],
}
class BaseStaticFilesTestCase(object):
"""
Test case with a couple utility assertions.
"""
def assertFileContains(self, filepath, text):
self.assertIn(text, self._get_file(force_text(filepath)),
"'%s' not in '%s'" % (text, filepath))
def assertFileNotFound(self, filepath):
self.assertRaises(IOError, self._get_file, filepath)
def render_template(self, template, **kwargs):
if isinstance(template, six.string_types):
template = Template(template)
return template.render(Context(kwargs)).strip()
def static_template_snippet(self, path, asvar=False):
if asvar:
return "{%% load static from staticfiles %%}{%% static '%s' as var %%}{{ var }}" % path
return "{%% load static from staticfiles %%}{%% static '%s' %%}" % path
def assertStaticRenders(self, path, result, asvar=False, **kwargs):
template = self.static_template_snippet(path, asvar)
self.assertEqual(self.render_template(template, **kwargs), result)
def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs):
self.assertRaises(exc, self.assertStaticRenders, path, result, **kwargs)
@override_settings(**TEST_SETTINGS)
class StaticFilesTestCase(BaseStaticFilesTestCase, SimpleTestCase):
pass
class BaseCollectionTestCase(BaseStaticFilesTestCase):
"""
Tests shared by all file finding features (collectstatic,
findstatic, and static serve view).
This relies on the asserts defined in BaseStaticFilesTestCase, but
is separated because some test cases need those asserts without
all these tests.
"""
def setUp(self):
super(BaseCollectionTestCase, self).setUp()
temp_dir = tempfile.mkdtemp()
# Override the STATIC_ROOT for all tests from setUp to tearDown
# rather than as a context manager
self.patched_settings = self.settings(STATIC_ROOT=temp_dir)
self.patched_settings.enable()
self.run_collectstatic()
# Same comment as in runtests.teardown.
self.addCleanup(shutil.rmtree, six.text_type(temp_dir))
def tearDown(self):
self.patched_settings.disable()
super(BaseCollectionTestCase, self).tearDown()
def run_collectstatic(self, **kwargs):
call_command('collectstatic', interactive=False, verbosity=0,
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATIC_ROOT, filepath)
with codecs.open(filepath, "r", "utf-8") as f:
return f.read()
class CollectionTestCase(BaseCollectionTestCase, StaticFilesTestCase):
pass
class TestDefaults(object):
"""
A few standard test cases.
"""
def test_staticfiles_dirs(self):
"""
Can find a file in a STATICFILES_DIRS directory.
"""
self.assertFileContains('test.txt', 'Can we find')
self.assertFileContains(os.path.join('prefix', 'test.txt'), 'Prefix')
def test_staticfiles_dirs_subdir(self):
"""
Can find a file in a subdirectory of a STATICFILES_DIRS
directory.
"""
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
"""
File in STATICFILES_DIRS has priority over file in app.
"""
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
"""
Can find a file in an app static/ directory.
"""
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
def test_nonascii_filenames(self):
"""
Can find a file with non-ASCII character in an app static/ directory.
"""
self.assertFileContains('test/⊗.txt', '⊗ in the app dir')
def test_camelcase_filenames(self):
"""
Can find a file with capital letters.
"""
self.assertFileContains('test/camelCase.txt', 'camelCase')
class TestFindStatic(CollectionTestCase, TestDefaults):
"""
Test ``findstatic`` management command.
"""
def _get_file(self, filepath):
out = six.StringIO()
call_command('findstatic', filepath, all=False, verbosity=0, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
with codecs.open(force_text(lines[0].strip()), "r", "utf-8") as f:
return f.read()
def test_all_files(self):
"""
Test that findstatic returns all candidate files if run without --first and -v1.
"""
out = six.StringIO()
call_command('findstatic', 'test/file.txt', verbosity=1, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
self.assertEqual(len(lines), 3) # three because there is also the "Found <file> here" line
self.assertIn('project', force_text(lines[1]))
self.assertIn('apps', force_text(lines[2]))
def test_all_files_less_verbose(self):
"""
Test that findstatic returns all candidate files if run without --first and -v0.
"""
out = six.StringIO()
call_command('findstatic', 'test/file.txt', verbosity=0, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
self.assertEqual(len(lines), 2)
self.assertIn('project', force_text(lines[0]))
self.assertIn('apps', force_text(lines[1]))
class TestConfiguration(StaticFilesTestCase):
def test_location_empty(self):
err = six.StringIO()
for root in ['', None]:
with override_settings(STATIC_ROOT=root):
with six.assertRaisesRegex(
self, ImproperlyConfigured,
'without having set the STATIC_ROOT setting to a filesystem path'):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
def test_local_storage_detection_helper(self):
staticfiles_storage = storage.staticfiles_storage
try:
storage.staticfiles_storage._wrapped = empty
with override_settings(STATICFILES_STORAGE='django_future_staticfiles.storage.StaticFilesStorage'):
command = collectstatic.Command()
self.assertTrue(command.is_local_storage())
storage.staticfiles_storage._wrapped = empty
with override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage'):
command = collectstatic.Command()
self.assertFalse(command.is_local_storage())
collectstatic.staticfiles_storage = storage.FileSystemStorage()
command = collectstatic.Command()
self.assertTrue(command.is_local_storage())
finally:
staticfiles_storage._wrapped = empty
collectstatic.staticfiles_storage = staticfiles_storage
storage.staticfiles_storage = staticfiles_storage
class TestCollection(CollectionTestCase, TestDefaults):
"""
Test ``collectstatic`` management command.
"""
def test_ignore(self):
"""
Test that -i patterns are ignored.
"""
self.assertFileNotFound('test/test.ignoreme')
def test_common_ignore_patterns(self):
"""
Common ignore patterns (*~, .*, CVS) are ignored.
"""
self.assertFileNotFound('test/.hidden')
self.assertFileNotFound('test/backup~')
self.assertFileNotFound('test/CVS')
class TestCollectionClear(CollectionTestCase):
"""
Test the ``--clear`` option of the ``collectstatic`` management command.
"""
def run_collectstatic(self, **kwargs):
clear_filepath = os.path.join(settings.STATIC_ROOT, 'cleared.txt')
with open(clear_filepath, 'w') as f:
f.write('should be cleared')
super(TestCollectionClear, self).run_collectstatic(clear=True)
def test_cleared_not_found(self):
self.assertFileNotFound('cleared.txt')
class TestCollectionExcludeNoDefaultIgnore(CollectionTestCase, TestDefaults):
"""
Test ``--exclude-dirs`` and ``--no-default-ignore`` options of the
``collectstatic`` management command.
"""
def run_collectstatic(self):
super(TestCollectionExcludeNoDefaultIgnore, self).run_collectstatic(
use_default_ignore_patterns=False)
def test_no_common_ignore_patterns(self):
"""
With --no-default-ignore, common ignore patterns (*~, .*, CVS)
are not ignored.
"""
self.assertFileContains('test/.hidden', 'should be ignored')
self.assertFileContains('test/backup~', 'should be ignored')
self.assertFileContains('test/CVS', 'should be ignored')
class TestNoFilesCreated(object):
def test_no_files_created(self):
"""
Make sure no files were create in the destination directory.
"""
self.assertEqual(os.listdir(settings.STATIC_ROOT), [])
class TestCollectionDryRun(CollectionTestCase, TestNoFilesCreated):
"""
Test ``--dry-run`` option for ``collectstatic`` management command.
"""
def run_collectstatic(self):
super(TestCollectionDryRun, self).run_collectstatic(dry_run=True)
class TestCollectionFilesOverride(CollectionTestCase):
"""
Test overriding duplicated files by ``collectstatic`` management command.
Check for proper handling of apps order in installed apps even if file modification
dates are in different order:
'staticfiles_tests.apps.test',
'staticfiles_tests.apps.no_label',
"""
def setUp(self):
self.orig_path = os.path.join(TEST_ROOT, 'apps', 'no_label', 'static', 'file2.txt')
# get modification and access times for no_label/static/file2.txt
self.orig_mtime = os.path.getmtime(self.orig_path)
self.orig_atime = os.path.getatime(self.orig_path)
# prepare duplicate of file2.txt from no_label app
# this file will have modification time older than no_label/static/file2.txt
# anyway it should be taken to STATIC_ROOT because 'test' app is before
# 'no_label' app in installed apps
self.testfile_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'file2.txt')
with open(self.testfile_path, 'w+') as f:
f.write('duplicate of file2.txt')
os.utime(self.testfile_path, (self.orig_atime - 1, self.orig_mtime - 1))
super(TestCollectionFilesOverride, self).setUp()
def tearDown(self):
if os.path.exists(self.testfile_path):
os.unlink(self.testfile_path)
# set back original modification time
os.utime(self.orig_path, (self.orig_atime, self.orig_mtime))
super(TestCollectionFilesOverride, self).tearDown()
def test_ordering_override(self):
"""
Test if collectstatic takes files in proper order
"""
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
# run collectstatic again
self.run_collectstatic()
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
# and now change modification time of no_label/static/file2.txt
# test app is first in installed apps so file2.txt should remain unmodified
mtime = os.path.getmtime(self.testfile_path)
atime = os.path.getatime(self.testfile_path)
os.utime(self.orig_path, (mtime + 1, atime + 1))
# run collectstatic again
self.run_collectstatic()
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
@override_settings(
STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage',
)
class TestCollectionNonLocalStorage(CollectionTestCase, TestNoFilesCreated):
"""
Tests for #15035
"""
pass
def hashed_file_path(test, path):
fullpath = test.render_template(test.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
class TestHashedFiles(object):
hashed_file_path = hashed_file_path
def tearDown(self):
# Clear hashed files to avoid side effects among tests.
storage.staticfiles_storage.hashed_files.clear()
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError,
"does/not/exist.png",
"/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt",
"/static/test/file.dad0999e4f8f.txt")
self.assertStaticRenders("test/file.txt",
"/static/test/file.dad0999e4f8f.txt", asvar=True)
self.assertStaticRenders("cached/styles.css",
"/static/cached/styles.bb84a0240107.css")
self.assertStaticRenders("path/",
"/static/path/")
self.assertStaticRenders("path/?query",
"/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_ignored_completely(self):
relpath = self.hashed_file_path("cached/css/ignored.css")
self.assertEqual(relpath, "cached/css/ignored.6c77f2643390.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'#foobar', content)
self.assertIn(b'http:foobar', content)
self.assertIn(b'https:foobar', content)
self.assertIn(b'data:foobar', content)
self.assertIn(b'//foobar', content)
def test_path_with_querystring(self):
relpath = self.hashed_file_path("cached/styles.css?spam=eggs")
self.assertEqual(relpath,
"cached/styles.bb84a0240107.css?spam=eggs")
with storage.staticfiles_storage.open(
"cached/styles.bb84a0240107.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_fragment(self):
relpath = self.hashed_file_path("cached/styles.css#eggs")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css#eggs")
with storage.staticfiles_storage.open(
"cached/styles.bb84a0240107.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_querystring_and_fragment(self):
relpath = self.hashed_file_path("cached/css/fragments.css")
self.assertEqual(relpath, "cached/css/fragments.75433540b096.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'fonts/font.a4b0478549d0.eot?#iefix', content)
self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content)
self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content)
self.assertIn(b'#default#VML', content)
def test_template_tag_absolute(self):
relpath = self.hashed_file_path("cached/absolute.css")
self.assertEqual(relpath, "cached/absolute.ae9ef2716fe3.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/cached/styles.css", content)
self.assertIn(b"/static/cached/styles.bb84a0240107.css", content)
self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content)
def test_template_tag_denorm(self):
relpath = self.hashed_file_path("cached/denorm.css")
self.assertEqual(relpath, "cached/denorm.c5bd139ad821.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"..//cached///styles.css", content)
self.assertIn(b"../cached/styles.bb84a0240107.css", content)
self.assertNotIn(b"url(img/relative.png )", content)
self.assertIn(b'url("img/relative.acae32e4532b.png', content)
def test_template_tag_relative(self):
relpath = self.hashed_file_path("cached/relative.css")
self.assertEqual(relpath, "cached/relative.b0375bd89156.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"../cached/styles.css", content)
self.assertNotIn(b'@import "styles.css"', content)
self.assertNotIn(b'url(img/relative.png)', content)
self.assertIn(b'url("img/relative.acae32e4532b.png")', content)
self.assertIn(b"../cached/styles.bb84a0240107.css", content)
def test_import_replacement(self):
"See #18050"
relpath = self.hashed_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.2b1d40b0bbd4.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"""import url("styles.bb84a0240107.css")""", relfile.read())
def test_template_tag_deep_relative(self):
relpath = self.hashed_file_path("cached/css/window.css")
self.assertEqual(relpath, "cached/css/window.3906afbb5a17.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b'url(img/window.png)', content)
self.assertIn(b'url("img/window.acae32e4532b.png")', content)
def test_template_tag_url(self):
relpath = self.hashed_file_path("cached/url.css")
self.assertEqual(relpath, "cached/url.902310b73412.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"https://", relfile.read())
def test_post_processing(self):
"""Test that post_processing behaves correctly.
Files that are alterable should always be post-processed; files that
aren't should be skipped.
collectstatic has already been called once in setUp() for this testcase,
therefore we check by verifying behavior on a second run.
"""
collectstatic_args = {
'interactive': False,
'verbosity': 0,
'link': False,
'clear': False,
'dry_run': False,
'post_process': True,
'use_default_ignore_patterns': True,
'ignore_patterns': ['*.ignoreme'],
}
collectstatic_cmd = CollectstaticCommand()
collectstatic_cmd.set_options(**collectstatic_args)
stats = collectstatic_cmd.collect()
self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed'])
self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified'])
self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed'])
def test_css_import_case_insensitive(self):
relpath = self.hashed_file_path("cached/styles_insensitive.css")
self.assertEqual(relpath, "cached/styles_insensitive.c609562b6d3c.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
@unittest.skipIf(django.get_version().startswith('1.6'),
"Added in Django 1.7")
@override_settings(
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'faulty')],
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
)
def test_post_processing_failure(self):
"""
Test that post_processing indicates the origin of the error when it
fails. Regression test for #18986.
"""
finders.get_finder.cache_clear()
err = six.StringIO()
with self.assertRaises(Exception):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue())
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='django_future_staticfiles.storage.CachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionCachedStorage(TestHashedFiles, BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
def test_cache_invalidation(self):
name = "cached/styles.css"
hashed_name = "cached/styles.bb84a0240107.css"
# check if the cache is filled correctly as expected
cache_key = storage.staticfiles_storage.hash_key(name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(self.hashed_file_path(name), cached_name)
# clearing the cache to make sure we re-set it correctly in the url method
storage.staticfiles_storage.hashed_files.clear()
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, None)
self.assertEqual(self.hashed_file_path(name), hashed_name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, hashed_name)
def test_cache_key_memcache_validation(self):
"""
Handle cache key creation correctly, see #17861.
"""
name = "/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/" + "\x16" + "\xb4"
cache_key = storage.staticfiles_storage.hash_key(name)
cache_validator = BaseCache({})
cache_validator.validate_key(cache_key)
self.assertEqual(cache_key, 'staticfiles:821ea71ef36f95b3922a77f7364670e7')
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='django_future_staticfiles.storage.ManifestStaticFilesStorage',
DEBUG=False,
))
class TestCollectionManifestStorage(TestHashedFiles, BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
def setUp(self):
super(TestCollectionManifestStorage, self).setUp()
self._clear_filename = os.path.join(TESTFILES_PATH, 'cleared.txt')
with open(self._clear_filename, 'w') as f:
f.write('to be deleted in one test')
def tearDown(self):
super(TestCollectionManifestStorage, self).tearDown()
if os.path.exists(self._clear_filename):
os.unlink(self._clear_filename)
def test_manifest_exists(self):
filename = storage.staticfiles_storage.manifest_name
path = storage.staticfiles_storage.path(filename)
self.assertTrue(os.path.exists(path))
def test_loaded_cache(self):
self.assertNotEqual(storage.staticfiles_storage.hashed_files, {})
manifest_content = storage.staticfiles_storage.read_manifest()
self.assertIn('"version": "%s"' %
storage.staticfiles_storage.manifest_version,
force_text(manifest_content))
def test_parse_cache(self):
hashed_files = storage.staticfiles_storage.hashed_files
manifest = storage.staticfiles_storage.load_manifest()
self.assertEqual(hashed_files, manifest)
def test_clear_empties_manifest(self):
cleared_file_name = os.path.join('test', 'cleared.txt')
# collect the additional file
self.run_collectstatic()
hashed_files = storage.staticfiles_storage.hashed_files
self.assertIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertIn(cleared_file_name, manifest_content)
original_path = storage.staticfiles_storage.path(cleared_file_name)
self.assertTrue(os.path.exists(original_path))
# delete the original file form the app, collect with clear
os.unlink(self._clear_filename)
self.run_collectstatic(clear=True)
self.assertFileNotFound(original_path)
hashed_files = storage.staticfiles_storage.hashed_files
self.assertNotIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertNotIn(cleared_file_name, manifest_content)
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='staticfiles_tests.storage.SimpleCachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionSimpleCachedStorage(BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
hashed_file_path = hashed_file_path
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError,
"does/not/exist.png",
"/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt",
"/static/test/file.deploy12345.txt")
self.assertStaticRenders("cached/styles.css",
"/static/cached/styles.deploy12345.css")
self.assertStaticRenders("path/",
"/static/path/")
self.assertStaticRenders("path/?query",
"/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.deploy12345.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.deploy12345.css", content)
@override_settings(ROOT_URLCONF='staticfiles_tests.urls.default')
class TestServeStatic(StaticFilesTestCase):
"""
Test static asset serving view.
"""
def _response(self, filepath):
return self.client.get(
posixpath.join(settings.STATIC_URL, filepath))
def assertFileContains(self, filepath, text):
self.assertContains(self._response(filepath), text)
def assertFileNotFound(self, filepath):
self.assertEqual(self._response(filepath).status_code, 404)
@unittest.skipIf(django.get_version().startswith('1.6'),
"Behavior changed in Django 1.7")
@override_settings(DEBUG=False)
class TestServeDisabled(TestServeStatic):
"""
Test serving static files disabled when DEBUG is False.
"""
def test_disabled_serving(self):
self.assertFileNotFound('test.txt')
class TestServeStaticWithDefaultURL(TestServeStatic, TestDefaults):
"""
Test static asset serving view with manually configured URLconf.
"""
pass
@override_settings(ROOT_URLCONF='staticfiles_tests.urls.helper')
class TestServeStaticWithURLHelper(TestServeStatic, TestDefaults):
"""
Test static asset serving view with staticfiles_urlpatterns helper.
"""
class FinderTestCase(object):
"""
Base finder test mixin.
On Windows, sometimes the case of the path we ask the finders for and the
path(s) they find can differ. Compare them using os.path.normcase() to
avoid false negatives.
"""
def test_find_first(self):
src, dst = self.find_first
found = self.finder.find(src)
self.assertEqual(os.path.normcase(found), os.path.normcase(dst))
def test_find_all(self):
src, dst = self.find_all
found = self.finder.find(src, all=True)
found = [os.path.normcase(f) for f in found]
dst = [os.path.normcase(d) for d in dst]
self.assertEqual(found, dst)
class TestFileSystemFinder(StaticFilesTestCase, FinderTestCase):
"""
Test FileSystemFinder.
"""
def setUp(self):
super(TestFileSystemFinder, self).setUp()
self.finder = finders.FileSystemFinder()
test_file_path = os.path.join(TEST_ROOT, 'project', 'documents', 'test', 'file.txt')
self.find_first = (os.path.join('test', 'file.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file.txt'), [test_file_path])
class TestAppDirectoriesFinder(StaticFilesTestCase, FinderTestCase):
"""
Test AppDirectoriesFinder.
"""
def setUp(self):
super(TestAppDirectoriesFinder, self).setUp()
self.finder = finders.AppDirectoriesFinder()
test_file_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test', 'file1.txt')
self.find_first = (os.path.join('test', 'file1.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file1.txt'), [test_file_path])
class TestDefaultStorageFinder(StaticFilesTestCase, FinderTestCase):
"""
Test DefaultStorageFinder.
"""
def setUp(self):
super(TestDefaultStorageFinder, self).setUp()
self.finder = finders.DefaultStorageFinder(
storage=StaticFilesStorage(location=settings.MEDIA_ROOT))
test_file_path = os.path.join(settings.MEDIA_ROOT, 'media-file.txt')
self.find_first = ('media-file.txt', test_file_path)
self.find_all = ('media-file.txt', [test_file_path])
@override_settings(
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'documents')],
)
class TestMiscFinder(SimpleTestCase):
"""
A few misc finder tests.
"""
def test_get_finder(self):
self.assertIsInstance(finders.get_finder(
'django.contrib.staticfiles.finders.FileSystemFinder'),
finders.FileSystemFinder)
@unittest.skipIf(django.get_version().startswith('1.6'),
"'searched_locations' attribute added in Django 1.7")
def test_searched_locations(self):
finders.find('spam')
self.assertEqual(finders.searched_locations,
[os.path.join(TEST_ROOT, 'project', 'documents')])
@override_settings(STATICFILES_DIRS='a string')
def test_non_tuple_raises_exception(self):
"""
We can't determine if STATICFILES_DIRS is set correctly just by
looking at the type, but we can determine if it's definitely wrong.
"""
self.assertRaises(ImproperlyConfigured, finders.FileSystemFinder)
@override_settings(MEDIA_ROOT='')
def test_location_empty(self):
self.assertRaises(ImproperlyConfigured, finders.DefaultStorageFinder)
class TestTemplateTag(StaticFilesTestCase):
def test_template_tag(self):
self.assertStaticRenders("does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("testfile.txt", "/static/testfile.txt")
class CustomStaticFilesStorage(StaticFilesStorage):
"""
Used in TestStaticFilePermissions
"""
def __init__(self, *args, **kwargs):
kwargs['file_permissions_mode'] = 0o640
kwargs['directory_permissions_mode'] = 0o740
super(CustomStaticFilesStorage, self).__init__(*args, **kwargs)
@unittest.skipIf(sys.platform.startswith('win'),
"Windows only partially supports chmod.")
class TestStaticFilePermissions(BaseCollectionTestCase, StaticFilesTestCase):
command_params = {'interactive': False,
'post_process': True,
'verbosity': 0,
'ignore_patterns': ['*.ignoreme'],
'use_default_ignore_patterns': True,
'clear': False,
'link': False,
'dry_run': False}
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
super(TestStaticFilePermissions, self).setUp()
def tearDown(self):
os.umask(self.old_umask)
super(TestStaticFilePermissions, self).tearDown()
# Don't run collectstatic command in this test class.
def run_collectstatic(self, **kwargs):
pass
@override_settings(FILE_UPLOAD_PERMISSIONS=None,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_collect_static_files_default_permissions(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o666 & ~self.umask)
self.assertEqual(dir_mode, 0o777 & ~self.umask)
@unittest.skipIf(django.get_version().startswith('1.6'),
"'filepermissions' keyword arg added in Django 1.7")
@override_settings(FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
STATICFILES_STORAGE='staticfiles_tests.tests.CustomStaticFilesStorage')
def test_collect_static_files_subclass_of_static_storage(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o640)
self.assertEqual(dir_mode, 0o740)
| 39.989023 | 372 | 0.668542 |
from __future__ import unicode_literals
import codecs
import os
import posixpath
import shutil
import sys
import tempfile
import unittest
import django
from django.conf import settings
from django.contrib.staticfiles import finders, storage
from django.contrib.staticfiles.management.commands import collectstatic
from django.contrib.staticfiles.management.commands.collectstatic import \
Command as CollectstaticCommand
from django.core.cache.backends.base import BaseCache
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.template import Context, Template
from django.test import SimpleTestCase
try:
from django.test.utils import override_settings
except:
from django.test import override_settings
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import empty
from django_future_staticfiles.storage import StaticFilesStorage
TEST_ROOT = os.path.dirname(upath(__file__))
TESTFILES_PATH = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test')
TEST_SETTINGS = {
'DEBUG': True,
'MEDIA_URL': '/media/',
'STATIC_URL': '/static/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'media'),
'STATIC_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'static'),
'STATICFILES_DIRS': [
os.path.join(TEST_ROOT, 'project', 'documents'),
('prefix', os.path.join(TEST_ROOT, 'project', 'prefixed')),
],
'STATICFILES_FINDERS': [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
],
'INSTALLED_APPS': [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'staticfiles_tests',
'staticfiles_tests.apps.test',
'staticfiles_tests.apps.no_label',
],
}
class BaseStaticFilesTestCase(object):
def assertFileContains(self, filepath, text):
self.assertIn(text, self._get_file(force_text(filepath)),
"'%s' not in '%s'" % (text, filepath))
def assertFileNotFound(self, filepath):
self.assertRaises(IOError, self._get_file, filepath)
def render_template(self, template, **kwargs):
if isinstance(template, six.string_types):
template = Template(template)
return template.render(Context(kwargs)).strip()
def static_template_snippet(self, path, asvar=False):
if asvar:
return "{%% load static from staticfiles %%}{%% static '%s' as var %%}{{ var }}" % path
return "{%% load static from staticfiles %%}{%% static '%s' %%}" % path
def assertStaticRenders(self, path, result, asvar=False, **kwargs):
template = self.static_template_snippet(path, asvar)
self.assertEqual(self.render_template(template, **kwargs), result)
def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs):
self.assertRaises(exc, self.assertStaticRenders, path, result, **kwargs)
@override_settings(**TEST_SETTINGS)
class StaticFilesTestCase(BaseStaticFilesTestCase, SimpleTestCase):
pass
class BaseCollectionTestCase(BaseStaticFilesTestCase):
def setUp(self):
super(BaseCollectionTestCase, self).setUp()
temp_dir = tempfile.mkdtemp()
self.patched_settings = self.settings(STATIC_ROOT=temp_dir)
self.patched_settings.enable()
self.run_collectstatic()
self.addCleanup(shutil.rmtree, six.text_type(temp_dir))
def tearDown(self):
self.patched_settings.disable()
super(BaseCollectionTestCase, self).tearDown()
def run_collectstatic(self, **kwargs):
call_command('collectstatic', interactive=False, verbosity=0,
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATIC_ROOT, filepath)
with codecs.open(filepath, "r", "utf-8") as f:
return f.read()
class CollectionTestCase(BaseCollectionTestCase, StaticFilesTestCase):
pass
class TestDefaults(object):
def test_staticfiles_dirs(self):
self.assertFileContains('test.txt', 'Can we find')
self.assertFileContains(os.path.join('prefix', 'test.txt'), 'Prefix')
def test_staticfiles_dirs_subdir(self):
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
def test_nonascii_filenames(self):
self.assertFileContains('test/⊗.txt', '⊗ in the app dir')
def test_camelcase_filenames(self):
self.assertFileContains('test/camelCase.txt', 'camelCase')
class TestFindStatic(CollectionTestCase, TestDefaults):
def _get_file(self, filepath):
out = six.StringIO()
call_command('findstatic', filepath, all=False, verbosity=0, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
with codecs.open(force_text(lines[0].strip()), "r", "utf-8") as f:
return f.read()
def test_all_files(self):
out = six.StringIO()
call_command('findstatic', 'test/file.txt', verbosity=1, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
self.assertEqual(len(lines), 3)
self.assertIn('project', force_text(lines[1]))
self.assertIn('apps', force_text(lines[2]))
def test_all_files_less_verbose(self):
out = six.StringIO()
call_command('findstatic', 'test/file.txt', verbosity=0, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
self.assertEqual(len(lines), 2)
self.assertIn('project', force_text(lines[0]))
self.assertIn('apps', force_text(lines[1]))
class TestConfiguration(StaticFilesTestCase):
def test_location_empty(self):
err = six.StringIO()
for root in ['', None]:
with override_settings(STATIC_ROOT=root):
with six.assertRaisesRegex(
self, ImproperlyConfigured,
'without having set the STATIC_ROOT setting to a filesystem path'):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
def test_local_storage_detection_helper(self):
staticfiles_storage = storage.staticfiles_storage
try:
storage.staticfiles_storage._wrapped = empty
with override_settings(STATICFILES_STORAGE='django_future_staticfiles.storage.StaticFilesStorage'):
command = collectstatic.Command()
self.assertTrue(command.is_local_storage())
storage.staticfiles_storage._wrapped = empty
with override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage'):
command = collectstatic.Command()
self.assertFalse(command.is_local_storage())
collectstatic.staticfiles_storage = storage.FileSystemStorage()
command = collectstatic.Command()
self.assertTrue(command.is_local_storage())
finally:
staticfiles_storage._wrapped = empty
collectstatic.staticfiles_storage = staticfiles_storage
storage.staticfiles_storage = staticfiles_storage
class TestCollection(CollectionTestCase, TestDefaults):
def test_ignore(self):
self.assertFileNotFound('test/test.ignoreme')
def test_common_ignore_patterns(self):
self.assertFileNotFound('test/.hidden')
self.assertFileNotFound('test/backup~')
self.assertFileNotFound('test/CVS')
class TestCollectionClear(CollectionTestCase):
def run_collectstatic(self, **kwargs):
clear_filepath = os.path.join(settings.STATIC_ROOT, 'cleared.txt')
with open(clear_filepath, 'w') as f:
f.write('should be cleared')
super(TestCollectionClear, self).run_collectstatic(clear=True)
def test_cleared_not_found(self):
self.assertFileNotFound('cleared.txt')
class TestCollectionExcludeNoDefaultIgnore(CollectionTestCase, TestDefaults):
def run_collectstatic(self):
super(TestCollectionExcludeNoDefaultIgnore, self).run_collectstatic(
use_default_ignore_patterns=False)
def test_no_common_ignore_patterns(self):
self.assertFileContains('test/.hidden', 'should be ignored')
self.assertFileContains('test/backup~', 'should be ignored')
self.assertFileContains('test/CVS', 'should be ignored')
class TestNoFilesCreated(object):
def test_no_files_created(self):
self.assertEqual(os.listdir(settings.STATIC_ROOT), [])
class TestCollectionDryRun(CollectionTestCase, TestNoFilesCreated):
def run_collectstatic(self):
super(TestCollectionDryRun, self).run_collectstatic(dry_run=True)
class TestCollectionFilesOverride(CollectionTestCase):
def setUp(self):
self.orig_path = os.path.join(TEST_ROOT, 'apps', 'no_label', 'static', 'file2.txt')
self.orig_mtime = os.path.getmtime(self.orig_path)
self.orig_atime = os.path.getatime(self.orig_path)
self.testfile_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'file2.txt')
with open(self.testfile_path, 'w+') as f:
f.write('duplicate of file2.txt')
os.utime(self.testfile_path, (self.orig_atime - 1, self.orig_mtime - 1))
super(TestCollectionFilesOverride, self).setUp()
def tearDown(self):
if os.path.exists(self.testfile_path):
os.unlink(self.testfile_path)
os.utime(self.orig_path, (self.orig_atime, self.orig_mtime))
super(TestCollectionFilesOverride, self).tearDown()
def test_ordering_override(self):
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
self.run_collectstatic()
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
mtime = os.path.getmtime(self.testfile_path)
atime = os.path.getatime(self.testfile_path)
os.utime(self.orig_path, (mtime + 1, atime + 1))
self.run_collectstatic()
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
@override_settings(
STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage',
)
class TestCollectionNonLocalStorage(CollectionTestCase, TestNoFilesCreated):
pass
def hashed_file_path(test, path):
fullpath = test.render_template(test.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
class TestHashedFiles(object):
hashed_file_path = hashed_file_path
def tearDown(self):
storage.staticfiles_storage.hashed_files.clear()
def test_template_tag_return(self):
self.assertStaticRaises(ValueError,
"does/not/exist.png",
"/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt",
"/static/test/file.dad0999e4f8f.txt")
self.assertStaticRenders("test/file.txt",
"/static/test/file.dad0999e4f8f.txt", asvar=True)
self.assertStaticRenders("cached/styles.css",
"/static/cached/styles.bb84a0240107.css")
self.assertStaticRenders("path/",
"/static/path/")
self.assertStaticRenders("path/?query",
"/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_ignored_completely(self):
relpath = self.hashed_file_path("cached/css/ignored.css")
self.assertEqual(relpath, "cached/css/ignored.6c77f2643390.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'#foobar', content)
self.assertIn(b'http:foobar', content)
self.assertIn(b'https:foobar', content)
self.assertIn(b'data:foobar', content)
self.assertIn(b'//foobar', content)
def test_path_with_querystring(self):
relpath = self.hashed_file_path("cached/styles.css?spam=eggs")
self.assertEqual(relpath,
"cached/styles.bb84a0240107.css?spam=eggs")
with storage.staticfiles_storage.open(
"cached/styles.bb84a0240107.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_fragment(self):
relpath = self.hashed_file_path("cached/styles.css#eggs")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css#eggs")
with storage.staticfiles_storage.open(
"cached/styles.bb84a0240107.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_querystring_and_fragment(self):
relpath = self.hashed_file_path("cached/css/fragments.css")
self.assertEqual(relpath, "cached/css/fragments.75433540b096.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'fonts/font.a4b0478549d0.eot?#iefix', content)
self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content)
self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content)
self.assertIn(b'#default#VML', content)
def test_template_tag_absolute(self):
relpath = self.hashed_file_path("cached/absolute.css")
self.assertEqual(relpath, "cached/absolute.ae9ef2716fe3.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/cached/styles.css", content)
self.assertIn(b"/static/cached/styles.bb84a0240107.css", content)
self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content)
def test_template_tag_denorm(self):
relpath = self.hashed_file_path("cached/denorm.css")
self.assertEqual(relpath, "cached/denorm.c5bd139ad821.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"..//cached///styles.css", content)
self.assertIn(b"../cached/styles.bb84a0240107.css", content)
self.assertNotIn(b"url(img/relative.png )", content)
self.assertIn(b'url("img/relative.acae32e4532b.png', content)
def test_template_tag_relative(self):
relpath = self.hashed_file_path("cached/relative.css")
self.assertEqual(relpath, "cached/relative.b0375bd89156.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"../cached/styles.css", content)
self.assertNotIn(b'@import "styles.css"', content)
self.assertNotIn(b'url(img/relative.png)', content)
self.assertIn(b'url("img/relative.acae32e4532b.png")', content)
self.assertIn(b"../cached/styles.bb84a0240107.css", content)
def test_import_replacement(self):
relpath = self.hashed_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.2b1d40b0bbd4.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"""import url("styles.bb84a0240107.css")""", relfile.read())
def test_template_tag_deep_relative(self):
relpath = self.hashed_file_path("cached/css/window.css")
self.assertEqual(relpath, "cached/css/window.3906afbb5a17.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b'url(img/window.png)', content)
self.assertIn(b'url("img/window.acae32e4532b.png")', content)
def test_template_tag_url(self):
relpath = self.hashed_file_path("cached/url.css")
self.assertEqual(relpath, "cached/url.902310b73412.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"https://", relfile.read())
def test_post_processing(self):
collectstatic_args = {
'interactive': False,
'verbosity': 0,
'link': False,
'clear': False,
'dry_run': False,
'post_process': True,
'use_default_ignore_patterns': True,
'ignore_patterns': ['*.ignoreme'],
}
collectstatic_cmd = CollectstaticCommand()
collectstatic_cmd.set_options(**collectstatic_args)
stats = collectstatic_cmd.collect()
self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed'])
self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified'])
self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed'])
def test_css_import_case_insensitive(self):
relpath = self.hashed_file_path("cached/styles_insensitive.css")
self.assertEqual(relpath, "cached/styles_insensitive.c609562b6d3c.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
@unittest.skipIf(django.get_version().startswith('1.6'),
"Added in Django 1.7")
@override_settings(
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'faulty')],
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
)
def test_post_processing_failure(self):
finders.get_finder.cache_clear()
err = six.StringIO()
with self.assertRaises(Exception):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue())
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='django_future_staticfiles.storage.CachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionCachedStorage(TestHashedFiles, BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
def test_cache_invalidation(self):
name = "cached/styles.css"
hashed_name = "cached/styles.bb84a0240107.css"
# check if the cache is filled correctly as expected
cache_key = storage.staticfiles_storage.hash_key(name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(self.hashed_file_path(name), cached_name)
# clearing the cache to make sure we re-set it correctly in the url method
storage.staticfiles_storage.hashed_files.clear()
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, None)
self.assertEqual(self.hashed_file_path(name), hashed_name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, hashed_name)
def test_cache_key_memcache_validation(self):
name = "/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/" + "\x16" + "\xb4"
cache_key = storage.staticfiles_storage.hash_key(name)
cache_validator = BaseCache({})
cache_validator.validate_key(cache_key)
self.assertEqual(cache_key, 'staticfiles:821ea71ef36f95b3922a77f7364670e7')
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='django_future_staticfiles.storage.ManifestStaticFilesStorage',
DEBUG=False,
))
class TestCollectionManifestStorage(TestHashedFiles, BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
def setUp(self):
super(TestCollectionManifestStorage, self).setUp()
self._clear_filename = os.path.join(TESTFILES_PATH, 'cleared.txt')
with open(self._clear_filename, 'w') as f:
f.write('to be deleted in one test')
def tearDown(self):
super(TestCollectionManifestStorage, self).tearDown()
if os.path.exists(self._clear_filename):
os.unlink(self._clear_filename)
def test_manifest_exists(self):
filename = storage.staticfiles_storage.manifest_name
path = storage.staticfiles_storage.path(filename)
self.assertTrue(os.path.exists(path))
def test_loaded_cache(self):
self.assertNotEqual(storage.staticfiles_storage.hashed_files, {})
manifest_content = storage.staticfiles_storage.read_manifest()
self.assertIn('"version": "%s"' %
storage.staticfiles_storage.manifest_version,
force_text(manifest_content))
def test_parse_cache(self):
hashed_files = storage.staticfiles_storage.hashed_files
manifest = storage.staticfiles_storage.load_manifest()
self.assertEqual(hashed_files, manifest)
def test_clear_empties_manifest(self):
cleared_file_name = os.path.join('test', 'cleared.txt')
# collect the additional file
self.run_collectstatic()
hashed_files = storage.staticfiles_storage.hashed_files
self.assertIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertIn(cleared_file_name, manifest_content)
original_path = storage.staticfiles_storage.path(cleared_file_name)
self.assertTrue(os.path.exists(original_path))
# delete the original file form the app, collect with clear
os.unlink(self._clear_filename)
self.run_collectstatic(clear=True)
self.assertFileNotFound(original_path)
hashed_files = storage.staticfiles_storage.hashed_files
self.assertNotIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertNotIn(cleared_file_name, manifest_content)
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='staticfiles_tests.storage.SimpleCachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionSimpleCachedStorage(BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
hashed_file_path = hashed_file_path
def test_template_tag_return(self):
self.assertStaticRaises(ValueError,
"does/not/exist.png",
"/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt",
"/static/test/file.deploy12345.txt")
self.assertStaticRenders("cached/styles.css",
"/static/cached/styles.deploy12345.css")
self.assertStaticRenders("path/",
"/static/path/")
self.assertStaticRenders("path/?query",
"/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.deploy12345.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.deploy12345.css", content)
@override_settings(ROOT_URLCONF='staticfiles_tests.urls.default')
class TestServeStatic(StaticFilesTestCase):
def _response(self, filepath):
return self.client.get(
posixpath.join(settings.STATIC_URL, filepath))
def assertFileContains(self, filepath, text):
self.assertContains(self._response(filepath), text)
def assertFileNotFound(self, filepath):
self.assertEqual(self._response(filepath).status_code, 404)
@unittest.skipIf(django.get_version().startswith('1.6'),
"Behavior changed in Django 1.7")
@override_settings(DEBUG=False)
class TestServeDisabled(TestServeStatic):
def test_disabled_serving(self):
self.assertFileNotFound('test.txt')
class TestServeStaticWithDefaultURL(TestServeStatic, TestDefaults):
pass
@override_settings(ROOT_URLCONF='staticfiles_tests.urls.helper')
class TestServeStaticWithURLHelper(TestServeStatic, TestDefaults):
class FinderTestCase(object):
def test_find_first(self):
src, dst = self.find_first
found = self.finder.find(src)
self.assertEqual(os.path.normcase(found), os.path.normcase(dst))
def test_find_all(self):
src, dst = self.find_all
found = self.finder.find(src, all=True)
found = [os.path.normcase(f) for f in found]
dst = [os.path.normcase(d) for d in dst]
self.assertEqual(found, dst)
class TestFileSystemFinder(StaticFilesTestCase, FinderTestCase):
def setUp(self):
super(TestFileSystemFinder, self).setUp()
self.finder = finders.FileSystemFinder()
test_file_path = os.path.join(TEST_ROOT, 'project', 'documents', 'test', 'file.txt')
self.find_first = (os.path.join('test', 'file.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file.txt'), [test_file_path])
class TestAppDirectoriesFinder(StaticFilesTestCase, FinderTestCase):
def setUp(self):
super(TestAppDirectoriesFinder, self).setUp()
self.finder = finders.AppDirectoriesFinder()
test_file_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test', 'file1.txt')
self.find_first = (os.path.join('test', 'file1.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file1.txt'), [test_file_path])
class TestDefaultStorageFinder(StaticFilesTestCase, FinderTestCase):
def setUp(self):
super(TestDefaultStorageFinder, self).setUp()
self.finder = finders.DefaultStorageFinder(
storage=StaticFilesStorage(location=settings.MEDIA_ROOT))
test_file_path = os.path.join(settings.MEDIA_ROOT, 'media-file.txt')
self.find_first = ('media-file.txt', test_file_path)
self.find_all = ('media-file.txt', [test_file_path])
@override_settings(
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'documents')],
)
class TestMiscFinder(SimpleTestCase):
def test_get_finder(self):
self.assertIsInstance(finders.get_finder(
'django.contrib.staticfiles.finders.FileSystemFinder'),
finders.FileSystemFinder)
@unittest.skipIf(django.get_version().startswith('1.6'),
"'searched_locations' attribute added in Django 1.7")
def test_searched_locations(self):
finders.find('spam')
self.assertEqual(finders.searched_locations,
[os.path.join(TEST_ROOT, 'project', 'documents')])
@override_settings(STATICFILES_DIRS='a string')
def test_non_tuple_raises_exception(self):
self.assertRaises(ImproperlyConfigured, finders.FileSystemFinder)
@override_settings(MEDIA_ROOT='')
def test_location_empty(self):
self.assertRaises(ImproperlyConfigured, finders.DefaultStorageFinder)
class TestTemplateTag(StaticFilesTestCase):
def test_template_tag(self):
self.assertStaticRenders("does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("testfile.txt", "/static/testfile.txt")
class CustomStaticFilesStorage(StaticFilesStorage):
def __init__(self, *args, **kwargs):
kwargs['file_permissions_mode'] = 0o640
kwargs['directory_permissions_mode'] = 0o740
super(CustomStaticFilesStorage, self).__init__(*args, **kwargs)
@unittest.skipIf(sys.platform.startswith('win'),
"Windows only partially supports chmod.")
class TestStaticFilePermissions(BaseCollectionTestCase, StaticFilesTestCase):
command_params = {'interactive': False,
'post_process': True,
'verbosity': 0,
'ignore_patterns': ['*.ignoreme'],
'use_default_ignore_patterns': True,
'clear': False,
'link': False,
'dry_run': False}
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
super(TestStaticFilePermissions, self).setUp()
def tearDown(self):
os.umask(self.old_umask)
super(TestStaticFilePermissions, self).tearDown()
# Don't run collectstatic command in this test class.
def run_collectstatic(self, **kwargs):
pass
@override_settings(FILE_UPLOAD_PERMISSIONS=None,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_collect_static_files_default_permissions(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o666 & ~self.umask)
self.assertEqual(dir_mode, 0o777 & ~self.umask)
@unittest.skipIf(django.get_version().startswith('1.6'),
"'filepermissions' keyword arg added in Django 1.7")
@override_settings(FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
STATICFILES_STORAGE='staticfiles_tests.tests.CustomStaticFilesStorage')
def test_collect_static_files_subclass_of_static_storage(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o640)
self.assertEqual(dir_mode, 0o740)
| true | true |
1c3a9281d90a51a0564b84bda147ed62f5c13d92 | 1,794 | py | Python | haiku_generation/src/models/embedding.py | nivethakesavan2203/haiku-generation | ef66c0aa5a5ffcfcfa26b8e993d3efdfcc1be804 | [
"MIT"
] | null | null | null | haiku_generation/src/models/embedding.py | nivethakesavan2203/haiku-generation | ef66c0aa5a5ffcfcfa26b8e993d3efdfcc1be804 | [
"MIT"
] | 3 | 2020-10-24T18:32:37.000Z | 2020-11-28T23:36:28.000Z | haiku_generation/src/models/embedding.py | nivethakesavan2203/haiku-generation | ef66c0aa5a5ffcfcfa26b8e993d3efdfcc1be804 | [
"MIT"
] | null | null | null | import torch
'''
one method:
load RoBERTa from torch.hub
import torch
roberta_torch = torch.hub.load('pytorch/fairseq', 'roberta.large')
roberta_torch.eval()
sentence = "I Love RoBERTa!!! I Love Pytorch!!!"
Apply Byte-Pair Encoding to input text, tokens should be a tensor
tokens_torch = roberta_torch.encode(sentence)
Extract features from RoBERTa using BPE text
embedding_torch = roberta_torch.extract_features(tokens_torch, return_all_hiddens=True)[0]
'''
'''
another method:
load RoBERTa from transformers, note it does not have .encode(), therefore we need RobertaTokenizer
import torch
from transformers import RobertaModel, RobertaTokenizer
tokenizer = RobertaTokenizer.from_pretrained("roberta-large")
roberta_trans = RobertaModel.from_pretrained("roberta-large")
sentence = "I Love RoBERTa!!! I Love Pytorch!!!"
Apply Byte-Pair Encoding to input text with RobertaTokenizer, note that tokenizer.encode() returns to you a list, but we need our tokens to be a tensor
tokens_trans = torch.tensor([tokenizer.encode(sentence)])
Extract features from RobertaModel using BPE text
embedding_trans = roberta_trans.embeddings(tokens_trans)[0]
'''
class RobertaModel():
def __init__(self):
self.model = torch.hub.load('pytorch/fairseq', 'roberta.large')
self.model.eval().cuda()
def __call__(self, content):
tokens = self.model.encode(content)
embed = self.model.extract_features(tokens, return_all_hiddens=True)[0]
return embed
if __name__ == '__main__':
# example usage
roberta = RobertaModel()
encoding = roberta('trees')
encoding2 = roberta('test')
encoding3 = roberta('go')
encoding4 = roberta('sandwich')
print(encoding.shape)
print(encoding)
print(encoding2.shape)
print(encoding3.shape)
| 29.9 | 151 | 0.741918 | import torch
class RobertaModel():
def __init__(self):
self.model = torch.hub.load('pytorch/fairseq', 'roberta.large')
self.model.eval().cuda()
def __call__(self, content):
tokens = self.model.encode(content)
embed = self.model.extract_features(tokens, return_all_hiddens=True)[0]
return embed
if __name__ == '__main__':
roberta = RobertaModel()
encoding = roberta('trees')
encoding2 = roberta('test')
encoding3 = roberta('go')
encoding4 = roberta('sandwich')
print(encoding.shape)
print(encoding)
print(encoding2.shape)
print(encoding3.shape)
| true | true |
1c3a929aa2844f2ac2e11633089511aa74cb0049 | 4,486 | py | Python | tools/automation/tests/run.py | KTH/azure-cli | 58aa9e320ea7c5213b4517172eaf71b3f5230fd6 | [
"MIT"
] | null | null | null | tools/automation/tests/run.py | KTH/azure-cli | 58aa9e320ea7c5213b4517172eaf71b3f5230fd6 | [
"MIT"
] | null | null | null | tools/automation/tests/run.py | KTH/azure-cli | 58aa9e320ea7c5213b4517172eaf71b3f5230fd6 | [
"MIT"
] | 1 | 2017-12-28T04:51:44.000Z | 2017-12-28T04:51:44.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
import os
import sys
from automation.utilities.path import filter_user_selected_modules_with_tests, get_repo_root
from automation.tests.nose_helper import get_nose_runner
from automation.utilities.path import get_test_results_dir
def get_unittest_runner(tests):
test_cases = list(tests)
def _runner(module_paths):
from subprocess import check_call, CalledProcessError
if len(module_paths) > 1:
print('When --test is given, no more than 1 module can be selected.')
return False
module_path = module_paths[0][len(os.path.join(get_repo_root(), 'src' + os.sep)):]
if module_path.startswith('command_modules'):
module_path = module_path.split(os.sep, 2)[-1].replace(os.sep, '.')
else:
module_path = module_path.split(os.sep, 1)[-1].replace(os.sep, '.')
try:
import unittest
suite = unittest.TestLoader().loadTestsFromNames(['{}.{}'.format(module_path, t) for t in test_cases])
runner = unittest.TextTestRunner()
result = runner.run(suite)
return not result.failures
except CalledProcessError:
return False
return _runner
def run_tests(modules, parallel, run_live, tests):
print('Run automation')
print('Modules: {}'.format(', '.join(name for name, _, _ in modules)))
# create test results folder
test_results_folder = get_test_results_dir(with_timestamp=True, prefix='tests')
# set environment variable
if run_live:
os.environ['AZURE_TEST_RUN_LIVE'] = 'True'
if not tests:
# the --test is not given, use nosetests to run entire module
print('Drive test by nosetests')
runner = get_nose_runner(test_results_folder, parallel=parallel, process_timeout=3600 if run_live else 600)
else:
# the --test is given, use unittest to run single test
print('Drive test by unittest')
runner = get_unittest_runner(tests)
# run tests
result = runner([p for _, _, p in modules])
return result
if __name__ == '__main__':
parse = argparse.ArgumentParser('Test tools')
parse.add_argument('--module', dest='modules', nargs='+',
help='The modules of which the test to be run. Accept short names, except azure-cli, '
'azure-cli-core and azure-cli-nspkg. The modules list can also be set through environment '
'variable AZURE_CLI_TEST_MODULES. The value should be a string of space separated module '
'names. The environment variable will be overwritten by command line parameters.')
parse.add_argument('--parallel', action='store_true',
help='Run the tests in parallel. This will affect the test output file.')
parse.add_argument('--live', action='store_true', help='Run all the tests live.')
parse.add_argument('--test', dest='tests', action='append',
help='The specific test to run in the given module. The string can represent a test class or a '
'test class and a test method name. Multiple tests can be given, but they should all '
'belong to one command modules.')
parse.add_argument('--ci', dest='ci', action='store_true', help='Run the tests in CI mode.')
args = parse.parse_args()
if args.ci:
print('Run tests in CI mode')
selected_modules = [('CI mode', 'azure.cli', 'azure.cli')]
else:
if not args.modules and os.environ.get('AZURE_CLI_TEST_MODULES', None):
print('Test modules list is parsed from environment variable AZURE_CLI_TEST_MODULES.')
args.modules = [m.strip() for m in os.environ.get('AZURE_CLI_TEST_MODULES').split(',')]
selected_modules = filter_user_selected_modules_with_tests(args.modules)
if not selected_modules:
parse.print_help()
sys.exit(1)
success = run_tests(selected_modules, parallel=args.parallel, run_live=args.live, tests=args.tests)
sys.exit(0 if success else 1)
| 43.980392 | 119 | 0.626839 |
import argparse
import os
import sys
from automation.utilities.path import filter_user_selected_modules_with_tests, get_repo_root
from automation.tests.nose_helper import get_nose_runner
from automation.utilities.path import get_test_results_dir
def get_unittest_runner(tests):
test_cases = list(tests)
def _runner(module_paths):
from subprocess import check_call, CalledProcessError
if len(module_paths) > 1:
print('When --test is given, no more than 1 module can be selected.')
return False
module_path = module_paths[0][len(os.path.join(get_repo_root(), 'src' + os.sep)):]
if module_path.startswith('command_modules'):
module_path = module_path.split(os.sep, 2)[-1].replace(os.sep, '.')
else:
module_path = module_path.split(os.sep, 1)[-1].replace(os.sep, '.')
try:
import unittest
suite = unittest.TestLoader().loadTestsFromNames(['{}.{}'.format(module_path, t) for t in test_cases])
runner = unittest.TextTestRunner()
result = runner.run(suite)
return not result.failures
except CalledProcessError:
return False
return _runner
def run_tests(modules, parallel, run_live, tests):
print('Run automation')
print('Modules: {}'.format(', '.join(name for name, _, _ in modules)))
test_results_folder = get_test_results_dir(with_timestamp=True, prefix='tests')
if run_live:
os.environ['AZURE_TEST_RUN_LIVE'] = 'True'
if not tests:
print('Drive test by nosetests')
runner = get_nose_runner(test_results_folder, parallel=parallel, process_timeout=3600 if run_live else 600)
else:
print('Drive test by unittest')
runner = get_unittest_runner(tests)
result = runner([p for _, _, p in modules])
return result
if __name__ == '__main__':
parse = argparse.ArgumentParser('Test tools')
parse.add_argument('--module', dest='modules', nargs='+',
help='The modules of which the test to be run. Accept short names, except azure-cli, '
'azure-cli-core and azure-cli-nspkg. The modules list can also be set through environment '
'variable AZURE_CLI_TEST_MODULES. The value should be a string of space separated module '
'names. The environment variable will be overwritten by command line parameters.')
parse.add_argument('--parallel', action='store_true',
help='Run the tests in parallel. This will affect the test output file.')
parse.add_argument('--live', action='store_true', help='Run all the tests live.')
parse.add_argument('--test', dest='tests', action='append',
help='The specific test to run in the given module. The string can represent a test class or a '
'test class and a test method name. Multiple tests can be given, but they should all '
'belong to one command modules.')
parse.add_argument('--ci', dest='ci', action='store_true', help='Run the tests in CI mode.')
args = parse.parse_args()
if args.ci:
print('Run tests in CI mode')
selected_modules = [('CI mode', 'azure.cli', 'azure.cli')]
else:
if not args.modules and os.environ.get('AZURE_CLI_TEST_MODULES', None):
print('Test modules list is parsed from environment variable AZURE_CLI_TEST_MODULES.')
args.modules = [m.strip() for m in os.environ.get('AZURE_CLI_TEST_MODULES').split(',')]
selected_modules = filter_user_selected_modules_with_tests(args.modules)
if not selected_modules:
parse.print_help()
sys.exit(1)
success = run_tests(selected_modules, parallel=args.parallel, run_live=args.live, tests=args.tests)
sys.exit(0 if success else 1)
| true | true |
1c3a92d72255f09c347645c69476c6e68af4c9c1 | 19,357 | py | Python | galpy/potential/DiskSCFPotential.py | gusbeane/galpy | d6db971285f163456c81775fc2fdc7d75189762c | [
"BSD-3-Clause"
] | null | null | null | galpy/potential/DiskSCFPotential.py | gusbeane/galpy | d6db971285f163456c81775fc2fdc7d75189762c | [
"BSD-3-Clause"
] | null | null | null | galpy/potential/DiskSCFPotential.py | gusbeane/galpy | d6db971285f163456c81775fc2fdc7d75189762c | [
"BSD-3-Clause"
] | null | null | null | ###############################################################################
# DiskSCFPotential.py: Potential expansion for disk+halo potentials
###############################################################################
from pkg_resources import parse_version
import copy
import numpy
import scipy
_SCIPY_VERSION= parse_version(scipy.__version__)
if _SCIPY_VERSION < parse_version('0.10'): #pragma: no cover
from scipy.maxentropy import logsumexp
elif _SCIPY_VERSION < parse_version('0.19'): #pragma: no cover
from scipy.misc import logsumexp
else:
from scipy.special import logsumexp
from ..util import conversion
from .Potential import Potential
from .SCFPotential import SCFPotential, \
scf_compute_coeffs_axi, scf_compute_coeffs
class DiskSCFPotential(Potential):
"""Class that implements a basis-function-expansion technique for solving the Poisson equation for disk (+halo) systems. We solve the Poisson equation for a given density :math:`\\rho(R,\phi,z)` by introducing *K* helper function pairs :math:`[\\Sigma_i(R),h_i(z)]`, with :math:`h_i(z) = \mathrm{d}^2 H(z) / \mathrm{d} z^2` and search for solutions of the form
.. math::
\Phi(R,\phi,z = \Phi_{\mathrm{ME}}(R,\phi,z) + 4\pi G\sum_i \\Sigma_i(r)\,H_i(z)\,,
where :math:`r` is the spherical radius :math:`r^2 = R^2+z^2`. We can solve for :math:`\Phi_{\mathrm{ME}}(R,\phi,z)` by solving
.. math::
\\frac{\\Delta \Phi_{\mathrm{ME}}(R,\phi,z)}{4\pi G} = \\rho(R,\phi,z) - \sum_i\left\{ \Sigma_i(r)\,h_i(z) + \\frac{\mathrm{d}^2 \Sigma_i(r)}{\mathrm{d} r^2}\,H_i(z)+\\frac{2}{r}\,\\frac{\mathrm{d} \Sigma_i(r)}{\mathrm{d} r}\left[H_i(z)+z\,\\frac{\mathrm{d}H_i(z)}{\mathrm{d} z}\\right]\\right\}\,.
We solve this equation by using the :ref:`SCFPotential <scf_potential>` class and methods (:ref:`scf_compute_coeffs_axi <scf_compute_coeffs_axi>` or :ref:`scf_compute_coeffs <scf_compute_coeffs>` depending on whether :math:`\\rho(R,\phi,z)` is axisymmetric or not). This technique works very well if the disk portion of the potential can be exactly written as :math:`\\rho_{\mathrm{disk}} = \sum_i \Sigma_i(R)\,h_i(z)`, because the effective density on the right-hand side of this new Poisson equation is then not 'disky' and can be well represented using spherical harmonics. But the technique is general and can be used to compute the potential of any disk+halo potential; the closer the disk is to :math:`\\rho_{\mathrm{disk}} \\approx \sum_i \Sigma_i(R)\,h_i(z)`, the better the technique works.
This technique was introduced by `Kuijken & Dubinski (1995) <http://adsabs.harvard.edu/abs/1995MNRAS.277.1341K>`__ and was popularized by `Dehnen & Binney (1998) <http://adsabs.harvard.edu/abs/1998MNRAS.294..429D>`__. The current implementation is a slight generalization of the technique in those papers and uses the SCF approach of `Hernquist & Ostriker (1992)
<http://adsabs.harvard.edu/abs/1992ApJ...386..375H>`__ to solve the Poisson equation for :math:`\Phi_{\mathrm{ME}}(R,\phi,z)` rather than solving it on a grid using spherical harmonics and interpolating the solution (as done in `Dehnen & Binney 1998 <http://adsabs.harvard.edu/abs/1998MNRAS.294..429D>`__).
"""
def __init__(self,amp=1.,normalize=False,
dens= lambda R,z: 13.5*numpy.exp(-3.*R)\
*numpy.exp(-27.*numpy.fabs(z)),
Sigma={'type':'exp','h':1./3.,'amp':1.},
hz={'type':'exp','h':1./27.},
Sigma_amp=None,dSigmadR=None,d2SigmadR2=None,
Hz=None,dHzdz=None,
N=10,L=10,a=1.,radial_order=None,costheta_order=None,
phi_order=None,
ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a DiskSCF Potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); cannot have units currently
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
dens= function of R,z[,phi optional] that gives the density [in natural units, cannot return a Quantity currently]
N=, L=, a=, radial_order=, costheta_order=, phi_order= keywords setting parameters for SCF solution for Phi_ME (see :ref:`scf_compute_coeffs_axi <scf_compute_coeffs_axi>` or :ref:`scf_compute_coeffs <scf_compute_coeffs>` depending on whether :math:`\\rho(R,\phi,z)` is axisymmetric or not)
Either:
(a) Sigma= Dictionary of surface density (example: {'type':'exp','h':1./3.,'amp':1.,'Rhole':0.} for amp x exp(-Rhole/R-R/h) )
hz= Dictionary of vertical profile, either 'exp' or 'sech2' (example {'type':'exp','h':1./27.} for exp(-|z|/h)/[2h], sech2 is sech^2(z/[2h])/[4h])
(b) Sigma= function of R that gives the surface density
dSigmadR= function that gives d Sigma / d R
d2SigmadR2= function that gives d^2 Sigma / d R^2
Sigma_amp= amplitude to apply to all Sigma functions
hz= function of z that gives the vertical profile
Hz= function of z such that d^2 Hz(z) / d z^2 = hz
dHzdz= function of z that gives d Hz(z) / d z
In both of these cases lists of arguments can be given for multiple disk components; can't mix (a) and (b) in these lists; if hz is a single item the same vertical profile is assumed for all Sigma
OUTPUT:
DiskSCFPotential object
HISTORY:
2016-12-26 - Written - Bovy (UofT)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo,amp_units=None)
a= conversion.parse_length(a,ro=self._ro)
# Parse and store given functions
self.isNonAxi= dens.__code__.co_argcount == 3
self._parse_Sigma(Sigma_amp,Sigma,dSigmadR,d2SigmadR2)
self._parse_hz(hz,Hz,dHzdz)
if self.isNonAxi:
self._inputdens= dens
else:
self._inputdens= lambda R,z,phi: dens(R,z)
# Solve Poisson equation for Phi_ME
if not self.isNonAxi:
dens_func= lambda R,z: phiME_dens(R,z,0.,self._inputdens,
self._Sigma,self._dSigmadR,
self._d2SigmadR2,
self._hz,self._Hz,
self._dHzdz,self._Sigma_amp)
Acos, Asin= scf_compute_coeffs_axi(dens_func,N,L,a=a,
radial_order=radial_order,
costheta_order=costheta_order)
else:
dens_func= lambda R,z,phi: phiME_dens(R,z,phi,self._inputdens,
self._Sigma,self._dSigmadR,
self._d2SigmadR2,
self._hz,self._Hz,
self._dHzdz,self._Sigma_amp)
Acos, Asin= scf_compute_coeffs(dens_func,N,L,a=a,
radial_order=radial_order,
costheta_order=costheta_order,
phi_order=phi_order)
self._phiME_dens_func= dens_func
self._scf= SCFPotential(amp=1.,Acos=Acos,Asin=Asin,a=a,ro=None,vo=None)
if not self._Sigma_dict is None and not self._hz_dict is None:
self.hasC= True
self.hasC_dens= True
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)):
self.normalize(normalize)
return None
def _parse_Sigma(self,Sigma_amp,Sigma,dSigmadR,d2SigmadR2):
"""
NAME:
_parse_Sigma
PURPOSE:
Parse the various input options for Sigma* functions
HISTORY:
2016-12-27 - Written - Bovy (UofT/CCA)
"""
if isinstance(Sigma,dict):
Sigma= [Sigma]
try:
nsigma= len(Sigma)
except TypeError:
Sigma_amp= [Sigma_amp]
Sigma= [Sigma]
dSigmadR= [dSigmadR]
d2SigmadR2= [d2SigmadR2]
nsigma= 1
self._nsigma= nsigma
self._Sigma_amp= Sigma_amp
self._Sigma= Sigma
self._dSigmadR= dSigmadR
self._d2SigmadR2= d2SigmadR2
if isinstance(Sigma[0],dict):
self._Sigma_dict= copy.copy(Sigma)
self._parse_Sigma_dict()
else:
self._Sigma_dict= None
return None
def _parse_Sigma_dict(self):
Sigma_amp, Sigma, dSigmadR, d2SigmadR2= [], [], [], []
for ii in range(self._nsigma):
ta, ts, tds, td2s= self._parse_Sigma_dict_indiv(self._Sigma[ii])
Sigma_amp.append(ta)
Sigma.append(ts)
dSigmadR.append(tds)
d2SigmadR2.append(td2s)
self._Sigma_amp= Sigma_amp
self._Sigma= Sigma
self._dSigmadR= dSigmadR
self._d2SigmadR2= d2SigmadR2
return None
def _parse_Sigma_dict_indiv(self,Sigma):
stype= Sigma.get('type','exp')
if stype == 'exp' and not 'Rhole' in Sigma:
rd= Sigma.get('h',1./3.)
ta= Sigma.get('amp',1.)
ts= lambda R, trd=rd: numpy.exp(-R/trd)
tds= lambda R, trd=rd: -numpy.exp(-R/trd)/trd
td2s= lambda R, trd=rd: numpy.exp(-R/trd)/trd**2.
elif stype == 'expwhole' or (stype == 'exp' and 'Rhole' in Sigma):
rd= Sigma.get('h',1./3.)
rm= Sigma.get('Rhole',0.5)
ta= Sigma.get('amp',1.)
ts= lambda R, trd=rd, trm=rm: numpy.exp(-trm/R-R/trd)
tds= lambda R, trd=rd, trm=rm: \
(trm/R**2.-1./trd)*numpy.exp(-trm/R-R/trd)
td2s= lambda R, trd=rd,trm=rm: \
((trm/R**2.-1./trd)**2.-2.*trm/R**3.)*numpy.exp(-trm/R-R/trd)
return (ta,ts,tds,td2s)
def _parse_hz(self,hz,Hz,dHzdz):
"""
NAME:
_parse_hz
PURPOSE:
Parse the various input options for Sigma* functions
HISTORY:
2016-12-27 - Written - Bovy (UofT/CCA)
"""
if isinstance(hz,dict):
hz= [hz]
try:
nhz= len(hz)
except TypeError:
hz= [hz]
Hz= [Hz]
dHzdz= [dHzdz]
nhz= 1
if nhz != self._nsigma and nhz != 1:
raise ValueError('Number of hz functions needs to be equal to the number of Sigma functions or to 1')
if nhz == 1 and self._nsigma > 1:
hz= [hz[0] for ii in range(self._nsigma)]
if not isinstance(hz[0],dict):
Hz= [Hz[0] for ii in range(self._nsigma)]
dHzdz= [dHzdz[0] for ii in range(self._nsigma)]
self._Hz= Hz
self._hz= hz
self._dHzdz= dHzdz
self._nhz= len(self._hz)
if isinstance(hz[0],dict):
self._hz_dict= copy.copy(hz)
self._parse_hz_dict()
else:
self._hz_dict= None
return None
def _parse_hz_dict(self):
hz, Hz, dHzdz= [], [], []
for ii in range(self._nhz):
th, tH, tdH= self._parse_hz_dict_indiv(self._hz[ii])
hz.append(th)
Hz.append(tH)
dHzdz.append(tdH)
self._hz= hz
self._Hz= Hz
self._dHzdz= dHzdz
return None
def _parse_hz_dict_indiv(self,hz):
htype= hz.get('type','exp')
if htype == 'exp':
zd= hz.get('h',0.0375)
th= lambda z, tzd=zd: 1./2./tzd*numpy.exp(-numpy.fabs(z)/tzd)
tH= lambda z, tzd= zd: (numpy.exp(-numpy.fabs(z)/tzd)-1.
+numpy.fabs(z)/tzd)*tzd/2.
tdH= lambda z, tzd= zd: 0.5*numpy.sign(z)\
*(1.-numpy.exp(-numpy.fabs(z)/tzd))
elif htype == 'sech2':
zd= hz.get('h',0.0375)
th= lambda z, tzd=zd: 1./numpy.cosh(z/2./tzd)**2./4./tzd
# Avoid overflow in cosh
tH= lambda z, tzd= zd: \
tzd*(logsumexp(numpy.array([z/2./tzd,-z/2./tzd]),axis=0)\
-numpy.log(2.))
tdH= lambda z, tzd= zd: numpy.tanh(z/2./tzd)/2.
return (th,tH,tdH)
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
potential at (R,z, phi)
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf(R,z,phi=phi,use_physical=False)
for a,s,H in zip(self._Sigma_amp,self._Sigma,self._Hz):
out+= 4.*numpy.pi*a*s(r)*H(z)
return out
def _Rforce(self,R,z,phi=0, t=0):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
radial force at (R,z, phi)
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.Rforce(R,z,phi=phi,use_physical=False)
for a,ds,H in zip(self._Sigma_amp,self._dSigmadR,self._Hz):
out-= 4.*numpy.pi*a*ds(r)*H(z)*R/r
return out
def _zforce(self,R,z,phi=0,t=0):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
vertical force at (R,z, phi)
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.zforce(R,z,phi=phi,use_physical=False)
for a,s,ds,H,dH in zip(self._Sigma_amp,self._Sigma,self._dSigmadR,
self._Hz,self._dHzdz):
out-= 4.*numpy.pi*a*(ds(r)*H(z)*z/r+s(r)*dH(z))
return out
def _phiforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2016-12-26 - Written - Bovy (UofT)
"""
return self._scf.phiforce(R,z,phi=phi,use_physical=False)
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.R2deriv(R,z,phi=phi,use_physical=False)
for a,ds,d2s,H in zip(self._Sigma_amp,self._dSigmadR,self._d2SigmadR2,
self._Hz):
out+= 4.*numpy.pi*a*H(z)/r**2.*(d2s(r)*R**2.+z**2./r*ds(r))
return out
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second vertical derivative
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.z2deriv(R,z,phi=phi,use_physical=False)
for a,s,ds,d2s,h,H,dH in zip(self._Sigma_amp,
self._Sigma,self._dSigmadR,self._d2SigmadR2,
self._hz,self._Hz,self._dHzdz):
out+= 4.*numpy.pi*a*(H(z)/r**2.*(d2s(r)*z**2.+ds(r)*R**2./r)
+2.*ds(r)*dH(z)*z/r+s(r)*h(z))
return out
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.Rzderiv(R,z,phi=phi,use_physical=False)
for a,ds,d2s,H,dH in zip(self._Sigma_amp,self._dSigmadR,
self._d2SigmadR2,self._Hz,self._dHzdz):
out+= 4.*numpy.pi*a*(H(z)*R*z/r**2.*(d2s(r)-ds(r)/r)
+ds(r)*dH(z)*R/r)
return out
def _phi2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_phi2deriv
PURPOSE:
evaluate the second azimuthal derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second azimuthal derivative
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
return self._scf.phi2deriv(R,z,phi=phi,use_physical=False)
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
density at (R,z, phi)
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.dens(R,z,phi=phi,use_physical=False)
for a,s,ds,d2s,h,H,dH in zip(self._Sigma_amp,self._Sigma,
self._dSigmadR,self._d2SigmadR2,
self._hz,self._Hz,self._dHzdz):
out+= a*(s(r)*h(z)+d2s(r)*H(z)+2./r*ds(r)*(H(z)+z*dH(z)))
return out
def phiME_dens(R,z,phi,dens,Sigma,dSigmadR,d2SigmadR2,hz,Hz,dHzdz,Sigma_amp):
"""The density corresponding to phi_ME"""
r= numpy.sqrt(R**2.+z**2.)
out= dens(R,z,phi)
for a,s,ds,d2s,h,H,dH \
in zip(Sigma_amp,Sigma,dSigmadR,d2SigmadR2,hz,Hz,dHzdz):
out-= a*(s(r)*h(z)+d2s(r)*H(z)+2./r*ds(r)*(H(z)+z*dH(z)))
return out
| 40.495816 | 799 | 0.536498 | f._hz_dict= None
return None
def _parse_hz_dict(self):
hz, Hz, dHzdz= [], [], []
for ii in range(self._nhz):
th, tH, tdH= self._parse_hz_dict_indiv(self._hz[ii])
hz.append(th)
Hz.append(tH)
dHzdz.append(tdH)
self._hz= hz
self._Hz= Hz
self._dHzdz= dHzdz
return None
def _parse_hz_dict_indiv(self,hz):
htype= hz.get('type','exp')
if htype == 'exp':
zd= hz.get('h',0.0375)
th= lambda z, tzd=zd: 1./2./tzd*numpy.exp(-numpy.fabs(z)/tzd)
tH= lambda z, tzd= zd: (numpy.exp(-numpy.fabs(z)/tzd)-1.
+numpy.fabs(z)/tzd)*tzd/2.
tdH= lambda z, tzd= zd: 0.5*numpy.sign(z)\
*(1.-numpy.exp(-numpy.fabs(z)/tzd))
elif htype == 'sech2':
zd= hz.get('h',0.0375)
th= lambda z, tzd=zd: 1./numpy.cosh(z/2./tzd)**2./4./tzd
tH= lambda z, tzd= zd: \
tzd*(logsumexp(numpy.array([z/2./tzd,-z/2./tzd]),axis=0)\
-numpy.log(2.))
tdH= lambda z, tzd= zd: numpy.tanh(z/2./tzd)/2.
return (th,tH,tdH)
def _evaluate(self,R,z,phi=0.,t=0.):
r= numpy.sqrt(R**2.+z**2.)
out= self._scf(R,z,phi=phi,use_physical=False)
for a,s,H in zip(self._Sigma_amp,self._Sigma,self._Hz):
out+= 4.*numpy.pi*a*s(r)*H(z)
return out
def _Rforce(self,R,z,phi=0, t=0):
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.Rforce(R,z,phi=phi,use_physical=False)
for a,ds,H in zip(self._Sigma_amp,self._dSigmadR,self._Hz):
out-= 4.*numpy.pi*a*ds(r)*H(z)*R/r
return out
def _zforce(self,R,z,phi=0,t=0):
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.zforce(R,z,phi=phi,use_physical=False)
for a,s,ds,H,dH in zip(self._Sigma_amp,self._Sigma,self._dSigmadR,
self._Hz,self._dHzdz):
out-= 4.*numpy.pi*a*(ds(r)*H(z)*z/r+s(r)*dH(z))
return out
def _phiforce(self,R,z,phi=0.,t=0.):
return self._scf.phiforce(R,z,phi=phi,use_physical=False)
def _R2deriv(self,R,z,phi=0.,t=0.):
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.R2deriv(R,z,phi=phi,use_physical=False)
for a,ds,d2s,H in zip(self._Sigma_amp,self._dSigmadR,self._d2SigmadR2,
self._Hz):
out+= 4.*numpy.pi*a*H(z)/r**2.*(d2s(r)*R**2.+z**2./r*ds(r))
return out
def _z2deriv(self,R,z,phi=0.,t=0.):
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.z2deriv(R,z,phi=phi,use_physical=False)
for a,s,ds,d2s,h,H,dH in zip(self._Sigma_amp,
self._Sigma,self._dSigmadR,self._d2SigmadR2,
self._hz,self._Hz,self._dHzdz):
out+= 4.*numpy.pi*a*(H(z)/r**2.*(d2s(r)*z**2.+ds(r)*R**2./r)
+2.*ds(r)*dH(z)*z/r+s(r)*h(z))
return out
def _Rzderiv(self,R,z,phi=0.,t=0.):
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.Rzderiv(R,z,phi=phi,use_physical=False)
for a,ds,d2s,H,dH in zip(self._Sigma_amp,self._dSigmadR,
self._d2SigmadR2,self._Hz,self._dHzdz):
out+= 4.*numpy.pi*a*(H(z)*R*z/r**2.*(d2s(r)-ds(r)/r)
+ds(r)*dH(z)*R/r)
return out
def _phi2deriv(self,R,z,phi=0.,t=0.):
return self._scf.phi2deriv(R,z,phi=phi,use_physical=False)
def _dens(self,R,z,phi=0.,t=0.):
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.dens(R,z,phi=phi,use_physical=False)
for a,s,ds,d2s,h,H,dH in zip(self._Sigma_amp,self._Sigma,
self._dSigmadR,self._d2SigmadR2,
self._hz,self._Hz,self._dHzdz):
out+= a*(s(r)*h(z)+d2s(r)*H(z)+2./r*ds(r)*(H(z)+z*dH(z)))
return out
def phiME_dens(R,z,phi,dens,Sigma,dSigmadR,d2SigmadR2,hz,Hz,dHzdz,Sigma_amp):
r= numpy.sqrt(R**2.+z**2.)
out= dens(R,z,phi)
for a,s,ds,d2s,h,H,dH \
in zip(Sigma_amp,Sigma,dSigmadR,d2SigmadR2,hz,Hz,dHzdz):
out-= a*(s(r)*h(z)+d2s(r)*H(z)+2./r*ds(r)*(H(z)+z*dH(z)))
return out
| true | true |
1c3a93f1cd72a0f6b6fbfcf2ca8188fce01f5828 | 834 | py | Python | src/Tikzifyables/Decorationable.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 41 | 2021-11-24T05:54:08.000Z | 2022-03-26T10:19:30.000Z | src/Tikzifyables/Decorationable.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 1 | 2022-02-28T04:34:51.000Z | 2022-03-07T10:49:27.000Z | src/Tikzifyables/Decorationable.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 10 | 2021-11-24T07:35:17.000Z | 2022-03-25T18:42:14.000Z | import Constant as c
class Decorationable:
def __init__(self, item):
"""Construct Decorationable."""
self.item = item
def tikzify_decoration(self):
"""Turn curve decoration into tikz code."""
if self.item["line"]["decoration"]["type"] == c.DecorationType.NONE:
return ''
if self.item["line"]["decoration"]["type"] == c.DecorationType.TEXT_ALONG_CURVE:
return 'decoration={%s, text={%s}}, decorate'\
% (c.DecorationType.TEXT_ALONG_CURVE, self.item["line"]["decoration"]["text"])
else:
return 'decoration={%s, amplitude=%s, segment length=%s}, decorate'\
% (self.item["line"]["decoration"]["type"],\
self.item["line"]["decoration"]["amplitude"],\
self.item["line"]["decoration"]["wavelength"])
| 41.7 | 90 | 0.583933 | import Constant as c
class Decorationable:
def __init__(self, item):
self.item = item
def tikzify_decoration(self):
if self.item["line"]["decoration"]["type"] == c.DecorationType.NONE:
return ''
if self.item["line"]["decoration"]["type"] == c.DecorationType.TEXT_ALONG_CURVE:
return 'decoration={%s, text={%s}}, decorate'\
% (c.DecorationType.TEXT_ALONG_CURVE, self.item["line"]["decoration"]["text"])
else:
return 'decoration={%s, amplitude=%s, segment length=%s}, decorate'\
% (self.item["line"]["decoration"]["type"],\
self.item["line"]["decoration"]["amplitude"],\
self.item["line"]["decoration"]["wavelength"])
| true | true |
1c3a9741c2cbff4bf9b26ce9cbf7b6d02d6e6f9d | 2,667 | py | Python | run_tests.py | Luvideria/lightmetrica-v3 | 3e83db59998e79648047bac29c37d8eb18d7600d | [
"MIT"
] | 101 | 2019-05-31T21:27:58.000Z | 2022-02-03T18:54:16.000Z | run_tests.py | Luvideria/lightmetrica-v3 | 3e83db59998e79648047bac29c37d8eb18d7600d | [
"MIT"
] | 11 | 2019-09-19T16:03:09.000Z | 2020-12-05T18:37:54.000Z | run_tests.py | Luvideria/lightmetrica-v3 | 3e83db59998e79648047bac29c37d8eb18d7600d | [
"MIT"
] | 14 | 2019-06-05T03:06:09.000Z | 2022-01-15T06:36:24.000Z | """Helper script to run all unit tests"""
import os
import sys
import json
import argparse
import subprocess as sp
from colorama import Fore, Back, Style
import pytest
from functest.run_all import run_functests
def add_bool_arg(parser, name, default):
"""Add boolean option with mutually exclusive group"""
# cf. https://stackoverflow.com/a/31347222/3127098
dest = name.replace('-','_')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=dest, action='store_true')
group.add_argument('--no-' + name, dest=dest, action='store_false')
parser.set_defaults(**{dest:default})
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Execute all unit tests')
parser.add_argument('--lmenv', type=str, help='Path to .lmenv file')
parser.add_argument('--output-dir', nargs='?', type=str, default='executed_functest', help='Output directory of executed notebooks')
add_bool_arg(parser, 'cpp-unit', True)
add_bool_arg(parser, 'python-unit', True)
add_bool_arg(parser, 'functest', False)
args = parser.parse_args()
# Read .lmeenv file
with open(args.lmenv) as f:
config = json.load(f)
# Set LD_LIBRARY_PATH for Linux environment
if sys.platform == 'linux':
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = config['bin_path']
# Execute C++ tests
if args.cpp_unit:
print(Fore.GREEN + "------------------------" + Style.RESET_ALL)
print(Fore.GREEN + "Executing C++ unit tests" + Style.RESET_ALL)
print(Fore.GREEN + "------------------------" + Style.RESET_ALL, flush=True)
command = [os.path.join(config['bin_path'], 'lm_test')]
if sys.platform == 'linux':
sp.check_call(command, env=env)
else:
sp.check_call(command)
# Execute python tests
if args.python_unit:
print(Fore.GREEN + "---------------------------" + Style.RESET_ALL)
print(Fore.GREEN + "Executing Python unit tests" + Style.RESET_ALL)
print(Fore.GREEN + "---------------------------" + Style.RESET_ALL, flush=True)
base_path = os.path.dirname(os.path.realpath(__file__))
pytest.main([
os.path.join(base_path, 'pytest'),
'--lmenv', args.lmenv
])
# Execute functional tests
if args.functest:
print(Fore.GREEN + "--------------------------" + Style.RESET_ALL)
print(Fore.GREEN + "Executing functional tests" + Style.RESET_ALL)
print(Fore.GREEN + "--------------------------" + Style.RESET_ALL, flush=True)
run_functests(args.output_dir, args.lmenv) | 41.030769 | 136 | 0.615298 | import os
import sys
import json
import argparse
import subprocess as sp
from colorama import Fore, Back, Style
import pytest
from functest.run_all import run_functests
def add_bool_arg(parser, name, default):
dest = name.replace('-','_')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=dest, action='store_true')
group.add_argument('--no-' + name, dest=dest, action='store_false')
parser.set_defaults(**{dest:default})
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Execute all unit tests')
parser.add_argument('--lmenv', type=str, help='Path to .lmenv file')
parser.add_argument('--output-dir', nargs='?', type=str, default='executed_functest', help='Output directory of executed notebooks')
add_bool_arg(parser, 'cpp-unit', True)
add_bool_arg(parser, 'python-unit', True)
add_bool_arg(parser, 'functest', False)
args = parser.parse_args()
with open(args.lmenv) as f:
config = json.load(f)
if sys.platform == 'linux':
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = config['bin_path']
if args.cpp_unit:
print(Fore.GREEN + "------------------------" + Style.RESET_ALL)
print(Fore.GREEN + "Executing C++ unit tests" + Style.RESET_ALL)
print(Fore.GREEN + "------------------------" + Style.RESET_ALL, flush=True)
command = [os.path.join(config['bin_path'], 'lm_test')]
if sys.platform == 'linux':
sp.check_call(command, env=env)
else:
sp.check_call(command)
if args.python_unit:
print(Fore.GREEN + "---------------------------" + Style.RESET_ALL)
print(Fore.GREEN + "Executing Python unit tests" + Style.RESET_ALL)
print(Fore.GREEN + "---------------------------" + Style.RESET_ALL, flush=True)
base_path = os.path.dirname(os.path.realpath(__file__))
pytest.main([
os.path.join(base_path, 'pytest'),
'--lmenv', args.lmenv
])
if args.functest:
print(Fore.GREEN + "--------------------------" + Style.RESET_ALL)
print(Fore.GREEN + "Executing functional tests" + Style.RESET_ALL)
print(Fore.GREEN + "--------------------------" + Style.RESET_ALL, flush=True)
run_functests(args.output_dir, args.lmenv) | true | true |
1c3a974367ccfcc39831aa874107cd41653b60c8 | 476 | py | Python | tests/comment_tests/custom_comments/views.py | webjunkie/django | 5dbca13f3baa2e1bafd77e84a80ad6d8a074712e | [
"BSD-3-Clause"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/lib/django-1.5/tests/regressiontests/comment_tests/custom_comments/views.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/lib/django-1.5/tests/regressiontests/comment_tests/custom_comments/views.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | from django.http import HttpResponse
def custom_submit_comment(request):
return HttpResponse("Hello from the custom submit comment view.")
def custom_flag_comment(request, comment_id):
return HttpResponse("Hello from the custom flag view.")
def custom_delete_comment(request, comment_id):
return HttpResponse("Hello from the custom delete view.")
def custom_approve_comment(request, comment_id):
return HttpResponse("Hello from the custom approve view.")
| 31.733333 | 69 | 0.787815 | from django.http import HttpResponse
def custom_submit_comment(request):
return HttpResponse("Hello from the custom submit comment view.")
def custom_flag_comment(request, comment_id):
return HttpResponse("Hello from the custom flag view.")
def custom_delete_comment(request, comment_id):
return HttpResponse("Hello from the custom delete view.")
def custom_approve_comment(request, comment_id):
return HttpResponse("Hello from the custom approve view.")
| true | true |
1c3a978c83cd8797ced0ce6a31280f63002b394e | 876 | py | Python | tracing/tracing/mre/job.py | ravitejavalluri/catapult | 246a39a82c2213d913a96fff020a263838dc76e6 | [
"BSD-3-Clause"
] | null | null | null | tracing/tracing/mre/job.py | ravitejavalluri/catapult | 246a39a82c2213d913a96fff020a263838dc76e6 | [
"BSD-3-Clause"
] | 1 | 2021-02-23T22:20:14.000Z | 2021-02-23T22:20:14.000Z | tracing/tracing/mre/job.py | ravitejavalluri/catapult | 246a39a82c2213d913a96fff020a263838dc76e6 | [
"BSD-3-Clause"
] | 1 | 2020-12-12T10:38:37.000Z | 2020-12-12T10:38:37.000Z | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import uuid
from tracing.mre import function_handle
class Job(object):
def __init__(self, map_function_handle, guid=uuid.uuid4()):
assert map_function_handle is not None
self._map_function_handle = map_function_handle
self._guid = guid
@property
def guid(self):
return self._guid
@property
def map_function_handle(self):
return self._map_function_handle
def AsDict(self):
values_dict = {
'map_function_handle': self._map_function_handle.AsDict(),
'guid': str(self._guid)
}
return values_dict
@staticmethod
def FromDict(job_dict):
return Job(
function_handle.FunctionHandle.FromDict(
job_dict['map_function_handle']))
| 23.675676 | 72 | 0.718037 |
import uuid
from tracing.mre import function_handle
class Job(object):
def __init__(self, map_function_handle, guid=uuid.uuid4()):
assert map_function_handle is not None
self._map_function_handle = map_function_handle
self._guid = guid
@property
def guid(self):
return self._guid
@property
def map_function_handle(self):
return self._map_function_handle
def AsDict(self):
values_dict = {
'map_function_handle': self._map_function_handle.AsDict(),
'guid': str(self._guid)
}
return values_dict
@staticmethod
def FromDict(job_dict):
return Job(
function_handle.FunctionHandle.FromDict(
job_dict['map_function_handle']))
| true | true |
1c3a99e3183356804b19506bb3a1a514d934c5ba | 77 | py | Python | geco/mips/packing/__init__.py | FreestyleBuild/GeCO | 6db1a549b3145b3bc5d3025a9bccc03be6575564 | [
"MIT"
] | 8 | 2020-12-16T09:59:05.000Z | 2022-03-18T09:48:43.000Z | geco/mips/packing/__init__.py | FreestyleBuild/GeCO | 6db1a549b3145b3bc5d3025a9bccc03be6575564 | [
"MIT"
] | 101 | 2020-11-09T10:20:03.000Z | 2022-03-24T13:50:06.000Z | geco/mips/packing/__init__.py | FreestyleBuild/GeCO | 6db1a549b3145b3bc5d3025a9bccc03be6575564 | [
"MIT"
] | 3 | 2021-04-06T13:26:03.000Z | 2022-03-22T13:22:16.000Z | from geco.mips.packing.generic import *
from geco.mips.packing.tang import *
| 25.666667 | 39 | 0.792208 | from geco.mips.packing.generic import *
from geco.mips.packing.tang import *
| true | true |
1c3a9b110a5850286ee33cd2b54e8186479fc867 | 1,029 | py | Python | src/utils/misc.py | iN1k1/deep-pyramidal-representations-peron-re-identification | 18eacd3b7bde2c4767ba290b655cb0f5c72ed8fe | [
"MIT"
] | 13 | 2019-08-09T08:33:27.000Z | 2020-12-21T08:51:33.000Z | src/utils/misc.py | iN1k1/deep-pyramidal-representations-peron-re-identification | 18eacd3b7bde2c4767ba290b655cb0f5c72ed8fe | [
"MIT"
] | 5 | 2021-03-19T02:17:23.000Z | 2022-03-11T23:53:44.000Z | src/utils/misc.py | iN1k1/deep-pyramidal-representations-peron-re-identification | 18eacd3b7bde2c4767ba290b655cb0f5c72ed8fe | [
"MIT"
] | 4 | 2019-11-06T08:02:21.000Z | 2021-01-13T20:34:23.000Z | import copy
import pickle
def create_list_of_dictionaries(num_items):
return [{} for _ in range(num_items)]
def clone(obj):
return copy.deepcopy(obj)
def save(file_name, **kwargs):
with open(file_name, 'wb') as fp:
pickle.dump(len(kwargs)+1, fp)
keys = list(kwargs.keys())
pickle.dump(keys, fp, protocol=pickle.HIGHEST_PROTOCOL)
for k, v in kwargs.items():
pickle.dump(v, fp, protocol=pickle.HIGHEST_PROTOCOL)
# h5f = h5py.File(file_name, 'w')
# grp = h5f.create_group('data')
# for k,v in kwargs.items():
# grp.create_dataset(k,data=v)
# h5f.close()
def load(file_name):
data = []
with open(file_name, 'rb') as f:
for _ in range(pickle.load(f)):
data.append(pickle.load(f))
keys = data[0]
kw_data = {}
for k, v in zip(keys, data[1:]):
kw_data[k] = v
return kw_data
# h5f = h5py.File(file_name, 'r')
# print(h5f.keys())
# data = h5f['data']
# h5f.close()
# return data
| 24.5 | 64 | 0.589893 | import copy
import pickle
def create_list_of_dictionaries(num_items):
return [{} for _ in range(num_items)]
def clone(obj):
return copy.deepcopy(obj)
def save(file_name, **kwargs):
with open(file_name, 'wb') as fp:
pickle.dump(len(kwargs)+1, fp)
keys = list(kwargs.keys())
pickle.dump(keys, fp, protocol=pickle.HIGHEST_PROTOCOL)
for k, v in kwargs.items():
pickle.dump(v, fp, protocol=pickle.HIGHEST_PROTOCOL)
def load(file_name):
data = []
with open(file_name, 'rb') as f:
for _ in range(pickle.load(f)):
data.append(pickle.load(f))
keys = data[0]
kw_data = {}
for k, v in zip(keys, data[1:]):
kw_data[k] = v
return kw_data
| true | true |
1c3a9c1696c04d95ff2bc346a005dbf72e3c517c | 8,992 | py | Python | examples/seq2seq/utils.py | anonSub2/bem | 2791575213ce2d64381ee3f5dd2e5428be23b960 | [
"MIT"
] | 2 | 2021-02-14T10:27:31.000Z | 2022-01-03T06:53:49.000Z | examples/seq2seq/utils.py | anonSub2/bem | 2791575213ce2d64381ee3f5dd2e5428be23b960 | [
"MIT"
] | 9 | 2020-11-13T17:51:46.000Z | 2022-03-12T00:46:15.000Z | examples/seq2seq/utils.py | anonSub2/bem | 2791575213ce2d64381ee3f5dd2e5428be23b960 | [
"MIT"
] | 1 | 2021-02-12T16:31:47.000Z | 2021-02-12T16:31:47.000Z | import itertools
import json
import os
import pickle
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import numpy as np
import torch
from rouge_score import rouge_scorer, scoring
from sacrebleu import corpus_bleu
from torch import nn
from torch.utils.data import Dataset, Sampler
from tqdm import tqdm
from transformers import BartTokenizer
def encode_file(
tokenizer,
data_path,
max_length,
pad_to_max_length=True,
return_tensors="pt",
overwrite_cache=False,
prefix="",
tok_name="",
):
extra_kw = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) else {}
cache_path = Path(f"{data_path}_{tok_name}{max_length}.pt")
if not overwrite_cache and cache_path.exists():
try:
examples = torch.load(cache_path)
assert isinstance(examples, list)
return examples
except Exception:
print(f"failed to load from {cache_path}, retokenizing {data_path}")
data_path = Path(data_path)
lns = lmap(str.strip, data_path.open().readlines())
lns = [prefix + text for text in lns]
assert lns, f"found empty file at {data_path}"
examples = []
for text in tqdm(lns, desc=f"Tokenizing {data_path.name}"):
tokenized = tokenizer(
[text],
max_length=max_length,
padding="max_length" if pad_to_max_length else None,
truncation=True,
return_tensors=return_tensors,
**extra_kw,
)
assert tokenized.input_ids.shape[1] == max_length
examples.append(tokenized)
torch.save(lmap(dict, examples), cache_path.open("wb"))
return examples
def lmap(f: Callable, x: Iterable) -> List:
"""list(map(f, x))"""
return list(map(f, x))
def calculate_bleu_score(output_lns, refs_lns, **kwargs) -> dict:
"""Uses sacrebleu's corpus_bleu implementation."""
return {"bleu": corpus_bleu(output_lns, [refs_lns], **kwargs).score}
def trim_batch(
input_ids, pad_token_id, attention_mask=None,
):
"""Remove columns that are populated exclusively by pad_token_id"""
keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SummarizationDataset(Dataset):
def __init__(
self,
tokenizer,
data_dir,
type_path="train",
max_source_length=1024,
max_target_length=56,
n_obs=None,
overwrite_cache=False,
prefix="",
src_lang=None,
tgt_lang=None,
):
super().__init__()
# FIXME: the rstrip logic strips all the chars, it seems.
tok_name = tokenizer.__class__.__name__.lower().rstrip("tokenizer")
if hasattr(tokenizer, "set_lang") and src_lang is not None:
tokenizer.set_lang(src_lang) # HACK: only applies to mbart
self.source = encode_file(
tokenizer,
os.path.join(data_dir, type_path + ".source"),
max_source_length,
overwrite_cache=overwrite_cache,
prefix=prefix,
tok_name=tok_name,
)
tgt_path = os.path.join(data_dir, type_path + ".target")
if hasattr(tokenizer, "set_lang"):
assert tgt_lang is not None, "--tgt_lang must be passed to build a translation"
tokenizer.set_lang(tgt_lang) # HACK: only applies to mbart
self.target = encode_file(
tokenizer, tgt_path, max_target_length, overwrite_cache=overwrite_cache, tok_name=tok_name
)
if n_obs is not None:
self.source = self.source[:n_obs]
self.target = self.target[:n_obs]
self.pad_token_id = tokenizer.pad_token_id
def __len__(self):
return len(self.source)
def __getitem__(self, index):
source_ids = self.source[index]["input_ids"].squeeze()
target_ids = self.target[index]["input_ids"].squeeze()
src_mask = self.source[index]["attention_mask"].squeeze()
return {"input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids}
@staticmethod
def trim_seq2seq_batch(batch, pad_token_id):
y = trim_batch(batch["decoder_input_ids"], pad_token_id)
source_ids, source_mask = trim_batch(batch["input_ids"], pad_token_id, attention_mask=batch["attention_mask"])
return source_ids, source_mask, y
def collate_fn(self, batch) -> dict:
input_ids = torch.stack([x["input_ids"] for x in batch])
masks = torch.stack([x["attention_mask"] for x in batch])
target_ids = torch.stack([x["decoder_input_ids"] for x in batch])
pad_token_id = self.pad_token_id
y = trim_batch(target_ids, pad_token_id)
source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks)
batch = {"input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y}
return batch
@property
def src_lens(self): # Can delete?
return lmap(len, self.source)
@property
def tgt_lens(self):
return lmap(len, self.target)
def make_sortish_sampler(self, batch_size):
return SortishSampler(self.source, batch_size)
class SortishSampler(Sampler):
"Go through the text data by order of src length with a bit of randomness. From fastai repo."
def __init__(self, data, batch_size):
self.data, self.bs = data, batch_size
def key(self, i):
return len(self.data[i])
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
idxs = np.random.permutation(len(self.data))
sz = self.bs * 50
ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)]
sort_idx = np.concatenate([sorted(s, key=self.key, reverse=True) for s in ck_idx])
sz = self.bs
ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)]
max_ck = np.argmax([self.key(ck[0]) for ck in ck_idx]) # find the chunk with the largest key,
ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first.
sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=np.int)
sort_idx = np.concatenate((ck_idx[0], sort_idx))
return iter(sort_idx)
def use_task_specific_params(model, task):
# update config with summarization specific params
task_specific_params = model.config.task_specific_params
if task_specific_params is not None:
model.config.update(task_specific_params.get(task, {}))
def pickle_load(path):
"""pickle.load(path)"""
with open(path, "rb") as f:
return pickle.load(f)
def pickle_save(obj, path):
"""pickle.dump(obj, path)"""
with open(path, "wb") as f:
return pickle.dump(obj, f)
def flatten_list(summary_ids: List[List]):
return [x for x in itertools.chain.from_iterable(summary_ids)]
def save_git_info(folder_path: str) -> None:
"""Save git information to output_dir/git_log.json"""
repo_infos = get_git_info()
save_json(repo_infos, os.path.join(folder_path, "git_log.json"))
def save_json(content, path):
with open(path, "w") as f:
json.dump(content, f, indent=4)
def load_json(path):
with open(path) as f:
return json.load(f)
def get_git_info():
repo = git.Repo(search_parent_directories=True)
repo_infos = {
"repo_id": str(repo),
"repo_sha": str(repo.head.object.hexsha),
"repo_branch": str(repo.active_branch),
}
return repo_infos
ROUGE_KEYS = ["rouge1", "rouge2", "rougeL"]
def calculate_rouge(output_lns: List[str], reference_lns: List[str], use_stemmer=True) -> Dict:
scorer = rouge_scorer.RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer)
aggregator = scoring.BootstrapAggregator()
for reference_ln, output_ln in zip(reference_lns, output_lns):
scores = scorer.score(reference_ln, output_ln)
aggregator.add_scores(scores)
result = aggregator.aggregate()
return {k: v.mid.fmeasure for k, v in result.items()}
def freeze_params(model: nn.Module):
for par in model.parameters():
par.requires_grad = False
def grad_status(model: nn.Module) -> Iterable:
return (par.requires_grad for par in model.parameters())
def any_requires_grad(model: nn.Module) -> bool:
return any(grad_status(model))
def assert_all_frozen(model):
model_grads: List[bool] = list(grad_status(model))
n_require_grad = sum(lmap(int, model_grads))
npars = len(model_grads)
assert not any(model_grads), f"{n_require_grad/npars:.1%} of {npars} weights require grad"
def assert_not_all_frozen(model):
model_grads: List[bool] = list(grad_status(model))
npars = len(model_grads)
assert any(model_grads), f"none of {npars} weights require grad"
| 33.058824 | 119 | 0.663479 | import itertools
import json
import os
import pickle
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import numpy as np
import torch
from rouge_score import rouge_scorer, scoring
from sacrebleu import corpus_bleu
from torch import nn
from torch.utils.data import Dataset, Sampler
from tqdm import tqdm
from transformers import BartTokenizer
def encode_file(
tokenizer,
data_path,
max_length,
pad_to_max_length=True,
return_tensors="pt",
overwrite_cache=False,
prefix="",
tok_name="",
):
extra_kw = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) else {}
cache_path = Path(f"{data_path}_{tok_name}{max_length}.pt")
if not overwrite_cache and cache_path.exists():
try:
examples = torch.load(cache_path)
assert isinstance(examples, list)
return examples
except Exception:
print(f"failed to load from {cache_path}, retokenizing {data_path}")
data_path = Path(data_path)
lns = lmap(str.strip, data_path.open().readlines())
lns = [prefix + text for text in lns]
assert lns, f"found empty file at {data_path}"
examples = []
for text in tqdm(lns, desc=f"Tokenizing {data_path.name}"):
tokenized = tokenizer(
[text],
max_length=max_length,
padding="max_length" if pad_to_max_length else None,
truncation=True,
return_tensors=return_tensors,
**extra_kw,
)
assert tokenized.input_ids.shape[1] == max_length
examples.append(tokenized)
torch.save(lmap(dict, examples), cache_path.open("wb"))
return examples
def lmap(f: Callable, x: Iterable) -> List:
return list(map(f, x))
def calculate_bleu_score(output_lns, refs_lns, **kwargs) -> dict:
return {"bleu": corpus_bleu(output_lns, [refs_lns], **kwargs).score}
def trim_batch(
input_ids, pad_token_id, attention_mask=None,
):
keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SummarizationDataset(Dataset):
def __init__(
self,
tokenizer,
data_dir,
type_path="train",
max_source_length=1024,
max_target_length=56,
n_obs=None,
overwrite_cache=False,
prefix="",
src_lang=None,
tgt_lang=None,
):
super().__init__()
tok_name = tokenizer.__class__.__name__.lower().rstrip("tokenizer")
if hasattr(tokenizer, "set_lang") and src_lang is not None:
tokenizer.set_lang(src_lang)
self.source = encode_file(
tokenizer,
os.path.join(data_dir, type_path + ".source"),
max_source_length,
overwrite_cache=overwrite_cache,
prefix=prefix,
tok_name=tok_name,
)
tgt_path = os.path.join(data_dir, type_path + ".target")
if hasattr(tokenizer, "set_lang"):
assert tgt_lang is not None, "--tgt_lang must be passed to build a translation"
tokenizer.set_lang(tgt_lang)
self.target = encode_file(
tokenizer, tgt_path, max_target_length, overwrite_cache=overwrite_cache, tok_name=tok_name
)
if n_obs is not None:
self.source = self.source[:n_obs]
self.target = self.target[:n_obs]
self.pad_token_id = tokenizer.pad_token_id
def __len__(self):
return len(self.source)
def __getitem__(self, index):
source_ids = self.source[index]["input_ids"].squeeze()
target_ids = self.target[index]["input_ids"].squeeze()
src_mask = self.source[index]["attention_mask"].squeeze()
return {"input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids}
@staticmethod
def trim_seq2seq_batch(batch, pad_token_id):
y = trim_batch(batch["decoder_input_ids"], pad_token_id)
source_ids, source_mask = trim_batch(batch["input_ids"], pad_token_id, attention_mask=batch["attention_mask"])
return source_ids, source_mask, y
def collate_fn(self, batch) -> dict:
input_ids = torch.stack([x["input_ids"] for x in batch])
masks = torch.stack([x["attention_mask"] for x in batch])
target_ids = torch.stack([x["decoder_input_ids"] for x in batch])
pad_token_id = self.pad_token_id
y = trim_batch(target_ids, pad_token_id)
source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks)
batch = {"input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y}
return batch
@property
def src_lens(self):
return lmap(len, self.source)
@property
def tgt_lens(self):
return lmap(len, self.target)
def make_sortish_sampler(self, batch_size):
return SortishSampler(self.source, batch_size)
class SortishSampler(Sampler):
def __init__(self, data, batch_size):
self.data, self.bs = data, batch_size
def key(self, i):
return len(self.data[i])
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
idxs = np.random.permutation(len(self.data))
sz = self.bs * 50
ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)]
sort_idx = np.concatenate([sorted(s, key=self.key, reverse=True) for s in ck_idx])
sz = self.bs
ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)]
max_ck = np.argmax([self.key(ck[0]) for ck in ck_idx])
ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0]
sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=np.int)
sort_idx = np.concatenate((ck_idx[0], sort_idx))
return iter(sort_idx)
def use_task_specific_params(model, task):
task_specific_params = model.config.task_specific_params
if task_specific_params is not None:
model.config.update(task_specific_params.get(task, {}))
def pickle_load(path):
with open(path, "rb") as f:
return pickle.load(f)
def pickle_save(obj, path):
with open(path, "wb") as f:
return pickle.dump(obj, f)
def flatten_list(summary_ids: List[List]):
return [x for x in itertools.chain.from_iterable(summary_ids)]
def save_git_info(folder_path: str) -> None:
repo_infos = get_git_info()
save_json(repo_infos, os.path.join(folder_path, "git_log.json"))
def save_json(content, path):
with open(path, "w") as f:
json.dump(content, f, indent=4)
def load_json(path):
with open(path) as f:
return json.load(f)
def get_git_info():
repo = git.Repo(search_parent_directories=True)
repo_infos = {
"repo_id": str(repo),
"repo_sha": str(repo.head.object.hexsha),
"repo_branch": str(repo.active_branch),
}
return repo_infos
ROUGE_KEYS = ["rouge1", "rouge2", "rougeL"]
def calculate_rouge(output_lns: List[str], reference_lns: List[str], use_stemmer=True) -> Dict:
scorer = rouge_scorer.RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer)
aggregator = scoring.BootstrapAggregator()
for reference_ln, output_ln in zip(reference_lns, output_lns):
scores = scorer.score(reference_ln, output_ln)
aggregator.add_scores(scores)
result = aggregator.aggregate()
return {k: v.mid.fmeasure for k, v in result.items()}
def freeze_params(model: nn.Module):
for par in model.parameters():
par.requires_grad = False
def grad_status(model: nn.Module) -> Iterable:
return (par.requires_grad for par in model.parameters())
def any_requires_grad(model: nn.Module) -> bool:
return any(grad_status(model))
def assert_all_frozen(model):
model_grads: List[bool] = list(grad_status(model))
n_require_grad = sum(lmap(int, model_grads))
npars = len(model_grads)
assert not any(model_grads), f"{n_require_grad/npars:.1%} of {npars} weights require grad"
def assert_not_all_frozen(model):
model_grads: List[bool] = list(grad_status(model))
npars = len(model_grads)
assert any(model_grads), f"none of {npars} weights require grad"
| true | true |
1c3a9cc108eeeaa60a56ed7406883c6368b6ea5c | 7,821 | py | Python | sdks/python/http_client/v1/polyaxon_sdk/models/v1_run_artifact.py | gregmbi/polyaxon | 8f24089fa9cb5df28fc7b70aec27d6d23ee81e8d | [
"Apache-2.0"
] | null | null | null | sdks/python/http_client/v1/polyaxon_sdk/models/v1_run_artifact.py | gregmbi/polyaxon | 8f24089fa9cb5df28fc7b70aec27d6d23ee81e8d | [
"Apache-2.0"
] | null | null | null | sdks/python/http_client/v1/polyaxon_sdk/models/v1_run_artifact.py | gregmbi/polyaxon | 8f24089fa9cb5df28fc7b70aec27d6d23ee81e8d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.0.79
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1RunArtifact(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"name": "str",
"state": "str",
"kind": "V1ArtifactKind",
"path": "str",
"connection": "str",
"summary": "object",
"is_input": "bool",
}
attribute_map = {
"name": "name",
"state": "state",
"kind": "kind",
"path": "path",
"connection": "connection",
"summary": "summary",
"is_input": "is_input",
}
def __init__(
self,
name=None,
state=None,
kind=None,
path=None,
connection=None,
summary=None,
is_input=None,
local_vars_configuration=None,
): # noqa: E501
"""V1RunArtifact - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._state = None
self._kind = None
self._path = None
self._connection = None
self._summary = None
self._is_input = None
self.discriminator = None
if name is not None:
self.name = name
if state is not None:
self.state = state
if kind is not None:
self.kind = kind
if path is not None:
self.path = path
if connection is not None:
self.connection = connection
if summary is not None:
self.summary = summary
if is_input is not None:
self.is_input = is_input
@property
def name(self):
"""Gets the name of this V1RunArtifact. # noqa: E501
:return: The name of this V1RunArtifact. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1RunArtifact.
:param name: The name of this V1RunArtifact. # noqa: E501
:type: str
"""
self._name = name
@property
def state(self):
"""Gets the state of this V1RunArtifact. # noqa: E501
:return: The state of this V1RunArtifact. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this V1RunArtifact.
:param state: The state of this V1RunArtifact. # noqa: E501
:type: str
"""
self._state = state
@property
def kind(self):
"""Gets the kind of this V1RunArtifact. # noqa: E501
:return: The kind of this V1RunArtifact. # noqa: E501
:rtype: V1ArtifactKind
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1RunArtifact.
:param kind: The kind of this V1RunArtifact. # noqa: E501
:type: V1ArtifactKind
"""
self._kind = kind
@property
def path(self):
"""Gets the path of this V1RunArtifact. # noqa: E501
:return: The path of this V1RunArtifact. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this V1RunArtifact.
:param path: The path of this V1RunArtifact. # noqa: E501
:type: str
"""
self._path = path
@property
def connection(self):
"""Gets the connection of this V1RunArtifact. # noqa: E501
:return: The connection of this V1RunArtifact. # noqa: E501
:rtype: str
"""
return self._connection
@connection.setter
def connection(self, connection):
"""Sets the connection of this V1RunArtifact.
:param connection: The connection of this V1RunArtifact. # noqa: E501
:type: str
"""
self._connection = connection
@property
def summary(self):
"""Gets the summary of this V1RunArtifact. # noqa: E501
:return: The summary of this V1RunArtifact. # noqa: E501
:rtype: object
"""
return self._summary
@summary.setter
def summary(self, summary):
"""Sets the summary of this V1RunArtifact.
:param summary: The summary of this V1RunArtifact. # noqa: E501
:type: object
"""
self._summary = summary
@property
def is_input(self):
"""Gets the is_input of this V1RunArtifact. # noqa: E501
:return: The is_input of this V1RunArtifact. # noqa: E501
:rtype: bool
"""
return self._is_input
@is_input.setter
def is_input(self, is_input):
"""Sets the is_input of this V1RunArtifact.
:param is_input: The is_input of this V1RunArtifact. # noqa: E501
:type: bool
"""
self._is_input = is_input
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1RunArtifact):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1RunArtifact):
return True
return self.to_dict() != other.to_dict()
| 25.558824 | 85 | 0.567319 |
import pprint
import re
import six
from polyaxon_sdk.configuration import Configuration
class V1RunArtifact(object):
openapi_types = {
"name": "str",
"state": "str",
"kind": "V1ArtifactKind",
"path": "str",
"connection": "str",
"summary": "object",
"is_input": "bool",
}
attribute_map = {
"name": "name",
"state": "state",
"kind": "kind",
"path": "path",
"connection": "connection",
"summary": "summary",
"is_input": "is_input",
}
def __init__(
self,
name=None,
state=None,
kind=None,
path=None,
connection=None,
summary=None,
is_input=None,
local_vars_configuration=None,
):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._state = None
self._kind = None
self._path = None
self._connection = None
self._summary = None
self._is_input = None
self.discriminator = None
if name is not None:
self.name = name
if state is not None:
self.state = state
if kind is not None:
self.kind = kind
if path is not None:
self.path = path
if connection is not None:
self.connection = connection
if summary is not None:
self.summary = summary
if is_input is not None:
self.is_input = is_input
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = state
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
@property
def connection(self):
return self._connection
@connection.setter
def connection(self, connection):
self._connection = connection
@property
def summary(self):
return self._summary
@summary.setter
def summary(self, summary):
self._summary = summary
@property
def is_input(self):
return self._is_input
@is_input.setter
def is_input(self, is_input):
self._is_input = is_input
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1RunArtifact):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1RunArtifact):
return True
return self.to_dict() != other.to_dict()
| true | true |
1c3a9cd61084c77c0da42925d3d0f03f40572a9e | 6,298 | py | Python | dojango/data/__init__.py | google-code-export/dojango | d350a0042635fcdb1a57b413a9836a728bc13d48 | [
"BSD-3-Clause"
] | 12 | 2015-06-07T23:14:36.000Z | 2018-05-14T11:03:41.000Z | dojango/data/__init__.py | thedrow/dojango | 9a6193153598ca22a088031c903930c8944f612b | [
"BSD-3-Clause"
] | 2 | 2015-04-03T09:16:37.000Z | 2015-11-01T01:39:49.000Z | dojango/data/__init__.py | thedrow/dojango | 9a6193153598ca22a088031c903930c8944f612b | [
"BSD-3-Clause"
] | 4 | 2017-02-19T03:15:45.000Z | 2019-12-27T15:01:50.000Z | import re
__all__ = ('QueryInfo', 'QueryReadStoreInfo',
'JsonRestStoreInfo', 'JsonQueryRestStoreInfo',)
class QueryInfoFeatures(object):
sorting = True
paging = False
class QueryInfo(object):
'''Usage (is that the right solution?):
info = QueryInfo(request)
info.extract()
queryset = extract.process(Object.objects.all())
'''
start = 0
end = 25
filters = {}
sorting = [] # key=field // value=descending(True/False)
request = None
max_count = 25
def __init__(self, request, max_count=None, **kwargs):
self.request = request
if max_count is not None:
self.max_count = max_count
def extract(self):
self.set_paging()
self.set_sorting()
self.set_filters()
def set_paging(self):
"""Needs to be implemented in a subclass"""
pass
def set_sorting(self):
pass
def set_filters(self):
"""Needs to be implemented in a subclass"""
pass
def process(self, queryset):
# maybe using Django's paginator
return queryset.filter(**self.filters).order_by(*self.sorting)[self.start:self.end]
class QueryReadStoreInfo(QueryInfo):
"""
A helper to evaluate a request from a dojox.data.QueryReadStore
and extracting the following information from it:
- paging
- sorting
- filters
Parameters could be passed within GET or POST.
"""
def set_paging(self):
start = self.request[self.request.method].pop('start', 0)
# TODO: start = 1???
count = self.request[self.request.method].pop('count', 25)
#if not is_number(end): # The dojo combobox may return "Infinity" tsss
if not is_number(count) or int(count) > self.max_count:
count = self.max_count
self.start = int(start)
self.end = int(start)+int(count)
def set_sorting(self):
# REQUEST['sort']:
# value: -sort_field (descending) / sort_field (ascending)
sort_attr = self.request[self.request.method].pop('sort', None)
if sort_attr:
self.sorting.append(sort_attr)
def set_filters(self):
query_dict = {}
for k,v in self.request[self.request.method].items():
query_dict[k] = v
class JsonRestStoreInfo(QueryReadStoreInfo):
"""
A helper to evaluate a request from a dojox.data.JsonRestStoreInfo
and extracting the following information:
- paging
- filters
The paging parameter is passed within the request header "Range".
Filters are passed via GET (equal to QueryReadStoreInfo).
Sorting is just possible with JsonQueryReadStoreInfo.
"""
def set_paging(self):
# Receiving the following header:
# Range: items=0-24
# Returning: Content-Range: items 0-24/66
if 'RANGE' in self.META:
regexp = re.compile(r"^\s*items=(\d+)-(\d+)", re.I)
match = regexp.match(self.META['RANGE'])
if match:
start, end = match.groups()
start, end = int(start), int(end)+1 # range-end means including that element!
self.start = start
count = self.max_count
if end-start < self.max_count:
count = end-start
self.end = start+count
def set_sorting(self):
# sorting is not available in the normal JsonRestStore
pass
class JsonQueryRestStoreInfo(QueryInfo):
jsonpath = None
jsonpath_filters = None
jsonpath_sorting = None
jsonpath_paging = None
def __init__(self, request, **kwargs):
"""
Matching the following example jsonpath:
/path/[?(@.field1='searchterm*'&@.field2='*search*')][/@['field1'],/@['field2']][0:24]
The last part of the URL will contain a JSONPath-query:
[filter][sort][start:end:step]
"""
path = request.path
if not path.endswith("/"):
path = path + "/"
# assuming that a least one /path/ will be before the jsonpath query
# and that the character [ initiates and ] ends the jsonpath
# [ will be removed from the start and ] from the end
match = re.match(r'^/.*/(\[.*\])/$', path)
if match:
self.jsonpath = match.groups()[0]
if self.jsonpath:
# now we remove the starting [ and ending ] and also splitting it via ][
parts = self.jsonpath[1:-1].split("][")
for part in parts:
if part.startswith("?"):
self.jsonpath_filters = part
elif re.match(r'^[/\\].*$', part):
self.jsonpath_sorting = part
# [start:end:step]
elif re.match(r'^\d*:\d*:{0,1}\d*$', part):
self.jsonpath_paging = part
super(JsonQueryRestStoreInfo, self).__init__(request, **kwargs)
def set_paging(self):
# handling 0:24
match = re.match(r'^(\d*):(\d*):{0,1}\d*$', self.jsonpath_paging)
if match:
start, end = match.groups()
if(start.length == 0):
start = 0
if(end.length == 0):
end = int(start) + self.max_count
start, end = int(start), int(end)+1 # second argument means the element should be included!
self.start = start
count = self.max_count
if end-start < self.max_count:
count = end-start
self.end = start+count
def set_sorting(self):
# handling /@['field1'],/@['field2']
for f in self.jsonpath_sorting.split(",/"):
m = re.match(r"([\\/])@\['(.*)'\]", f)
if m:
sort_prefix = "-"
direction, field = m.groups()
if direction == "/":
descending = ""
self.sorting.append(sort_prefix + field)
def set_filters(self):
# handling ?(@.field1='searchterm*'&@.field2~'*search*')
pass | 34.604396 | 103 | 0.544141 | import re
__all__ = ('QueryInfo', 'QueryReadStoreInfo',
'JsonRestStoreInfo', 'JsonQueryRestStoreInfo',)
class QueryInfoFeatures(object):
sorting = True
paging = False
class QueryInfo(object):
start = 0
end = 25
filters = {}
sorting = []
request = None
max_count = 25
def __init__(self, request, max_count=None, **kwargs):
self.request = request
if max_count is not None:
self.max_count = max_count
def extract(self):
self.set_paging()
self.set_sorting()
self.set_filters()
def set_paging(self):
pass
def set_sorting(self):
pass
def set_filters(self):
pass
def process(self, queryset):
return queryset.filter(**self.filters).order_by(*self.sorting)[self.start:self.end]
class QueryReadStoreInfo(QueryInfo):
def set_paging(self):
start = self.request[self.request.method].pop('start', 0)
# TODO: start = 1???
count = self.request[self.request.method].pop('count', 25)
#if not is_number(end): # The dojo combobox may return "Infinity" tsss
if not is_number(count) or int(count) > self.max_count:
count = self.max_count
self.start = int(start)
self.end = int(start)+int(count)
def set_sorting(self):
# REQUEST['sort']:
# value: -sort_field (descending) / sort_field (ascending)
sort_attr = self.request[self.request.method].pop('sort', None)
if sort_attr:
self.sorting.append(sort_attr)
def set_filters(self):
query_dict = {}
for k,v in self.request[self.request.method].items():
query_dict[k] = v
class JsonRestStoreInfo(QueryReadStoreInfo):
def set_paging(self):
# Receiving the following header:
# Range: items=0-24
# Returning: Content-Range: items 0-24/66
if 'RANGE' in self.META:
regexp = re.compile(r"^\s*items=(\d+)-(\d+)", re.I)
match = regexp.match(self.META['RANGE'])
if match:
start, end = match.groups()
start, end = int(start), int(end)+1 # range-end means including that element!
self.start = start
count = self.max_count
if end-start < self.max_count:
count = end-start
self.end = start+count
def set_sorting(self):
# sorting is not available in the normal JsonRestStore
pass
class JsonQueryRestStoreInfo(QueryInfo):
jsonpath = None
jsonpath_filters = None
jsonpath_sorting = None
jsonpath_paging = None
def __init__(self, request, **kwargs):
path = request.path
if not path.endswith("/"):
path = path + "/"
# assuming that a least one /path/ will be before the jsonpath query
# and that the character [ initiates and ] ends the jsonpath
# [ will be removed from the start and ] from the end
match = re.match(r'^/.*/(\[.*\])/$', path)
if match:
self.jsonpath = match.groups()[0]
if self.jsonpath:
# now we remove the starting [ and ending ] and also splitting it via ][
parts = self.jsonpath[1:-1].split("][")
for part in parts:
if part.startswith("?"):
self.jsonpath_filters = part
elif re.match(r'^[/\\].*$', part):
self.jsonpath_sorting = part
# [start:end:step]
elif re.match(r'^\d*:\d*:{0,1}\d*$', part):
self.jsonpath_paging = part
super(JsonQueryRestStoreInfo, self).__init__(request, **kwargs)
def set_paging(self):
# handling 0:24
match = re.match(r'^(\d*):(\d*):{0,1}\d*$', self.jsonpath_paging)
if match:
start, end = match.groups()
if(start.length == 0):
start = 0
if(end.length == 0):
end = int(start) + self.max_count
start, end = int(start), int(end)+1 # second argument means the element should be included!
self.start = start
count = self.max_count
if end-start < self.max_count:
count = end-start
self.end = start+count
def set_sorting(self):
# handling /@['field1'],/@['field2']
for f in self.jsonpath_sorting.split(",/"):
m = re.match(r"([\\/])@\['(.*)'\]", f)
if m:
sort_prefix = "-"
direction, field = m.groups()
if direction == "/":
descending = ""
self.sorting.append(sort_prefix + field)
def set_filters(self):
# handling ?(@.field1='searchterm*'&@.field2~'*search*')
pass | true | true |
1c3a9d06ab53caee2453bd894a5e26b2ee1e502f | 16,846 | py | Python | Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/Initialization.py | ghbtest/deep-learning-coursera | 95d343f2136e20f285963a2605739dc966d82b09 | [
"MIT"
] | null | null | null | Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/Initialization.py | ghbtest/deep-learning-coursera | 95d343f2136e20f285963a2605739dc966d82b09 | [
"MIT"
] | null | null | null | Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/Initialization.py | ghbtest/deep-learning-coursera | 95d343f2136e20f285963a2605739dc966d82b09 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Initialization
#
# Welcome to the first assignment of "Improving Deep Neural Networks".
#
# Training your neural network requires specifying an initial value of the weights. A well chosen initialization method will help learning.
#
# If you completed the previous course of this specialization, you probably followed our instructions for weight initialization, and it has worked out so far. But how do you choose the initialization for a new neural network? In this notebook, you will see how different initializations lead to different results.
#
# A well chosen initialization can:
# - Speed up the convergence of gradient descent
# - Increase the odds of gradient descent converging to a lower training (and generalization) error
#
# To get started, run the following cell to load the packages and the planar dataset you will try to classify.
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
get_ipython().run_line_magic('matplotlib', 'inline')
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
# You would like a classifier to separate the blue dots from the red dots.
# ## 1 - Neural Network model
# You will use a 3-layer neural network (already implemented for you). Here are the initialization methods you will experiment with:
# - *Zeros initialization* -- setting `initialization = "zeros"` in the input argument.
# - *Random initialization* -- setting `initialization = "random"` in the input argument. This initializes the weights to large random values.
# - *He initialization* -- setting `initialization = "he"` in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015.
#
# **Instructions**: Please quickly read over the code below, and run it. In the next part you will implement the three initialization methods that this `model()` calls.
# In[2]:
def model(X, Y, learning_rate=0.01, num_iterations=15000, print_cost=True, initialization="he"):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
"""
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# ## 2 - Zero initialization
#
# There are two types of parameters to initialize in a neural network:
# - the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$
# - the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$
#
# **Exercise**: Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to "break symmetry", but lets try it anyway and see what happens. Use np.zeros((..,..)) with the correct shapes.
# In[3]:
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l - 1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# In[4]:
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# Run the following code to train your model on 15,000 iterations using zeros initialization.
# In[5]:
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary:
# In[6]:
print("predictions_train = " + str(predictions_train))
print("predictions_test = " + str(predictions_test))
# In[7]:
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5, 1.5])
axes.set_ylim([-1.5, 1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# The model is predicting 0 for every example.
#
# In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression.
# <font color='blue'>
# **What you should remember**:
# - The weights $W^{[l]}$ should be initialized randomly to break symmetry.
# - It is however okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly.
#
# ## 3 - Random initialization
#
# To break symmetry, lets intialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you will see what happens if the weights are intialized randomly, but to very large values.
#
# **Exercise**: Implement the following function to initialize your weights to large random values (scaled by \*10) and your biases to zeros. Use `np.random.randn(..,..) * 10` for weights and `np.zeros((.., ..))` for biases. We are using a fixed `np.random.seed(..)` to make sure your "random" weights match ours, so don't worry if running several times your code gives you always the same initial values for the parameters.
# In[59]:
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) * 3
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# In[51]:
parameters = initialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# Run the following code to train your model on 15,000 iterations using random initialization.
# In[52]:
parameters = model(train_X, train_Y, initialization = "random")
print("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes.
#
# Anyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s.
# In[21]:
print(predictions_train)
print(predictions_test)
# In[22]:
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5, 1.5])
axes.set_ylim([-1.5, 1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# **Observations**:
# - The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\log(a^{[3]}) = \log(0)$, the loss goes to infinity.
# - Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm.
# - If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.
#
# <font color='blue'>
# **In summary**:
# - Initializing weights to very large random values does not work well.
# - Hopefully intializing with small random values does better. The important question is: how small should be these random values be? Lets find out in the next part!
# In[31]:
#compare X and parameter values
from scipy import stats
stats.describe(train_X.flatten())
# xmean=np.mean(train_X)
# print (xmean)
# In[28]:
print (parameters)
# In[57]:
allw=np.concatenate((parameters['W1'].flatten(), parameters['W2'].flatten(), parameters['W3'].flatten()))
# In[58]:
stats.describe(allw)
# ## 4 - He initialization
#
# Finally, try "He Initialization"; this is named for the first author of He et al., 2015. (If you have heard of "Xavier initialization", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of `sqrt(1./layers_dims[l-1])` where He initialization would use `sqrt(2./layers_dims[l-1])`.)
#
# **Exercise**: Implement the following function to initialize your parameters with He initialization.
#
# **Hint**: This function is similar to the previous `initialize_parameters_random(...)`. The only difference is that instead of multiplying `np.random.randn(..,..)` by 10, you will multiply it by $\sqrt{\frac{2}{\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation.
# In[53]:
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) * np.sqrt(2 / layers_dims[l - 1])
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# In[54]:
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# [[ 1.78862847 0.43650985]
# [ 0.09649747 -1.8634927 ]
# [-0.2773882 -0.35475898]
# [-0.08274148 -0.62700068]]
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# [[ 0.]
# [ 0.]
# [ 0.]
# [ 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# [[-0.03098412 -0.33744411 -0.92904268 0.62552248]]
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# [[ 0.]]
# </td>
# </tr>
#
# </table>
# Run the following code to train your model on 15,000 iterations using He initialization.
# In[55]:
parameters = model(train_X, train_Y, initialization = "he")
print("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# In[56]:
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5, 1.5])
axes.set_ylim([-1.5, 1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# **Observations**:
# - The model with He initialization separates the blue and the red dots very well in a small number of iterations.
#
# ## 5 - Conclusions
# You have seen three different types of initializations. For the same number of iterations and same hyperparameters the comparison is:
#
# <table>
# <tr>
# <td>
# **Model**
# </td>
# <td>
# **Train accuracy**
# </td>
# <td>
# **Problem/Comment**
# </td>
#
# </tr>
# <td>
# 3-layer NN with zeros initialization
# </td>
# <td>
# 50%
# </td>
# <td>
# fails to break symmetry
# </td>
# <tr>
# <td>
# 3-layer NN with large random initialization
# </td>
# <td>
# 83%
# </td>
# <td>
# too large weights
# </td>
# </tr>
# <tr>
# <td>
# 3-layer NN with He initialization
# </td>
# <td>
# 99%
# </td>
# <td>
# recommended method
# </td>
# </tr>
# </table>
# <font color='blue'>
# **What you should remember from this notebook**:
# - Different initializations lead to different results
# - Random initialization is used to break symmetry and make sure different hidden units can learn different things
# - Don't intialize to values that are too large
# - He initialization works well for networks with ReLU activations.
| 33.692 | 426 | 0.649056 |
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
get_ipython().run_line_magic('matplotlib', 'inline')
plt.rcParams['figure.figsize'] = (7.0, 4.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
train_X, train_Y, test_X, test_Y = load_dataset()
rations=15000, print_cost=True, initialization="he"):
grads = {}
costs = []
m = X.shape[1]
layers_dims = [X.shape[0], 10, 5, 1]
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
for i in range(0, num_iterations):
a3, cache = forward_propagation(X, parameters)
cost = compute_loss(a3, Y)
grads = backward_propagation(X, Y, cache)
parameters = update_parameters(parameters, grads, learning_rate)
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
rameters_zeros
def initialize_parameters_zeros(layers_dims):
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l - 1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# In[4]:
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# Run the following code to train your model on 15,000 iterations using zeros initialization.
# In[5]:
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary:
# In[6]:
print("predictions_train = " + str(predictions_train))
print("predictions_test = " + str(predictions_test))
# In[7]:
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5, 1.5])
axes.set_ylim([-1.5, 1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# The model is predicting 0 for every example.
#
# In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression.
# <font color='blue'>
# **What you should remember**:
# - The weights $W^{[l]}$ should be initialized randomly to break symmetry.
# - It is however okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly.
#
# ## 3 - Random initialization
#
# To break symmetry, lets intialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you will see what happens if the weights are intialized randomly, but to very large values.
#
# **Exercise**: Implement the following function to initialize your weights to large random values (scaled by \*10) and your biases to zeros. Use `np.random.randn(..,..) * 10` for weights and `np.zeros((.., ..))` for biases. We are using a fixed `np.random.seed(..)` to make sure your "random" weights match ours, so don't worry if running several times your code gives you always the same initial values for the parameters.
def initialize_parameters_random(layers_dims):
np.random.seed(3)
parameters = {}
L = len(layers_dims)
for l in range(1, L):
l], 1))
tialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
parameters = model(train_X, train_Y, initialization = "random")
print("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
#
# Anyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s.
# In[21]:
print(predictions_train)
print(predictions_test)
# In[22]:
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5, 1.5])
axes.set_ylim([-1.5, 1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# **Observations**:
# - The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\log(a^{[3]}) = \log(0)$, the loss goes to infinity.
# - Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm.
# - If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.
#
# <font color='blue'>
# **In summary**:
# - Initializing weights to very large random values does not work well.
# - Hopefully intializing with small random values does better. The important question is: how small should be these random values be? Lets find out in the next part!
# In[31]:
#compare X and parameter values
from scipy import stats
stats.describe(train_X.flatten())
# xmean=np.mean(train_X)
# print (xmean)
# In[28]:
print (parameters)
# In[57]:
allw=np.concatenate((parameters['W1'].flatten(), parameters['W2'].flatten(), parameters['W3'].flatten()))
# In[58]:
stats.describe(allw)
# ## 4 - He initialization
#
# Finally, try "He Initialization"; this is named for the first author of He et al., 2015. (If you have heard of "Xavier initialization", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of `sqrt(1./layers_dims[l-1])` where He initialization would use `sqrt(2./layers_dims[l-1])`.)
#
# **Exercise**: Implement the following function to initialize your parameters with He initialization.
#
# **Hint**: This function is similar to the previous `initialize_parameters_random(...)`. The only difference is that instead of multiplying `np.random.randn(..,..)` by 10, you will multiply it by $\sqrt{\frac{2}{\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation.
# In[53]:
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) * np.sqrt(2 / layers_dims[l - 1])
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# In[54]:
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# [[ 1.78862847 0.43650985]
# [ 0.09649747 -1.8634927 ]
# [-0.2773882 -0.35475898]
# [-0.08274148 -0.62700068]]
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# [[ 0.]
# [ 0.]
# [ 0.]
# [ 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# [[-0.03098412 -0.33744411 -0.92904268 0.62552248]]
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# [[ 0.]]
# </td>
# </tr>
#
# </table>
# Run the following code to train your model on 15,000 iterations using He initialization.
# In[55]:
parameters = model(train_X, train_Y, initialization = "he")
print("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# In[56]:
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5, 1.5])
axes.set_ylim([-1.5, 1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# **Observations**:
# - The model with He initialization separates the blue and the red dots very well in a small number of iterations.
#
# ## 5 - Conclusions
# You have seen three different types of initializations. For the same number of iterations and same hyperparameters the comparison is:
#
# <table>
# <tr>
# <td>
# **Model**
# </td>
# <td>
# **Train accuracy**
# </td>
# <td>
# **Problem/Comment**
# </td>
#
# </tr>
# <td>
# 3-layer NN with zeros initialization
# </td>
# <td>
# 50%
# </td>
# <td>
# fails to break symmetry
# </td>
# <tr>
# <td>
# 3-layer NN with large random initialization
# </td>
# <td>
# 83%
# </td>
# <td>
# too large weights
# </td>
# </tr>
# <tr>
# <td>
# 3-layer NN with He initialization
# </td>
# <td>
# 99%
# </td>
# <td>
# recommended method
# </td>
# </tr>
# </table>
# <font color='blue'>
# **What you should remember from this notebook**:
# - Different initializations lead to different results
# - Random initialization is used to break symmetry and make sure different hidden units can learn different things
# - Don't intialize to values that are too large
| true | true |
1c3a9d8f7b6e6385b2b168615513a966bcde5285 | 3,042 | py | Python | basmati/basmati_cmd.py | markmuetz/basmati | cb6fc0e1959501f7ea136c5184f897557459bc54 | [
"Apache-2.0"
] | 4 | 2020-02-21T02:42:37.000Z | 2021-09-08T17:10:38.000Z | basmati/basmati_cmd.py | markmuetz/basmati | cb6fc0e1959501f7ea136c5184f897557459bc54 | [
"Apache-2.0"
] | null | null | null | basmati/basmati_cmd.py | markmuetz/basmati | cb6fc0e1959501f7ea136c5184f897557459bc54 | [
"Apache-2.0"
] | 1 | 2021-09-08T09:15:32.000Z | 2021-09-08T09:15:32.000Z | import argparse
import sys
from typing import List
from basmati.basmati_demo import demo_main
from basmati.basmati_errors import BasmatiError
from basmati.downloader import download_main, DATASETS, HYDROBASINS_REGIONS
from basmati.setup_logging import setup_logger
from basmati.version import get_version
def _build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description='BASMATI command line tool')
# Top-level arguments.
parser.add_argument('--debug', '-D', help='Enable debug logging', action='store_true')
if not sys.platform.startswith('win'):
parser.add_argument('--bw', '-B', help='Disable colour logging', action='store_true')
parser.add_argument('--warn', '-W', help='Warn on stderr', action='store_true')
subparsers = parser.add_subparsers(dest='subcmd_name', required=True)
# name of subparser ends up in subcmd_name -- use for command dispatch.
# demo
demo_parser = subparsers.add_parser('demo', help='Run through BASMATI demo')
# download
download_parser = subparsers.add_parser('download', aliases=['dl'],
help='Download HydroSHEDS datasets')
download_parser.add_argument('--dataset', '-d',
required=True,
choices=DATASETS,
help='Dataset to download')
download_parser.add_argument('--region', '-r',
required=True,
choices=HYDROBASINS_REGIONS,
help='Region to download')
download_parser.add_argument('--delete-zip',
action='store_true',
help='Delete zipfile after unpacking')
# version
version_parser = subparsers.add_parser('version', help='Print BASMATI version')
version_parser.add_argument('--long', '-l', action='store_true', help='long version')
return parser
def _parse_args(argv: List[str]) -> argparse.Namespace:
parser = _build_parser()
args = parser.parse_args(argv[1:])
return args
def basmati_cmd(argv: List[str] = sys.argv) -> None:
args = _parse_args(argv)
loglevel = 'DEBUG' if args.debug else 'INFO'
if sys.platform.startswith('win'):
args.bw = True
logger = setup_logger(loglevel, not args.bw, args.warn)
logger.debug(argv)
logger.debug(args)
try:
# Dispatch command.
# N.B. args should always be dereferenced at this point,
# not passed into any subsequent functions.
if args.subcmd_name == 'demo':
demo_main()
elif args.subcmd_name in ['download', 'dl']:
download_main(args.dataset, args.region, args.delete_zip)
elif args.subcmd_name == 'version':
print(get_version(form='long' if args.long else 'short'))
except BasmatiError as be:
logger.error(be)
raise
except Exception as e:
logger.error(e)
raise
| 37.097561 | 93 | 0.624918 | import argparse
import sys
from typing import List
from basmati.basmati_demo import demo_main
from basmati.basmati_errors import BasmatiError
from basmati.downloader import download_main, DATASETS, HYDROBASINS_REGIONS
from basmati.setup_logging import setup_logger
from basmati.version import get_version
def _build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description='BASMATI command line tool')
parser.add_argument('--debug', '-D', help='Enable debug logging', action='store_true')
if not sys.platform.startswith('win'):
parser.add_argument('--bw', '-B', help='Disable colour logging', action='store_true')
parser.add_argument('--warn', '-W', help='Warn on stderr', action='store_true')
subparsers = parser.add_subparsers(dest='subcmd_name', required=True)
demo_parser = subparsers.add_parser('demo', help='Run through BASMATI demo')
download_parser = subparsers.add_parser('download', aliases=['dl'],
help='Download HydroSHEDS datasets')
download_parser.add_argument('--dataset', '-d',
required=True,
choices=DATASETS,
help='Dataset to download')
download_parser.add_argument('--region', '-r',
required=True,
choices=HYDROBASINS_REGIONS,
help='Region to download')
download_parser.add_argument('--delete-zip',
action='store_true',
help='Delete zipfile after unpacking')
version_parser = subparsers.add_parser('version', help='Print BASMATI version')
version_parser.add_argument('--long', '-l', action='store_true', help='long version')
return parser
def _parse_args(argv: List[str]) -> argparse.Namespace:
parser = _build_parser()
args = parser.parse_args(argv[1:])
return args
def basmati_cmd(argv: List[str] = sys.argv) -> None:
args = _parse_args(argv)
loglevel = 'DEBUG' if args.debug else 'INFO'
if sys.platform.startswith('win'):
args.bw = True
logger = setup_logger(loglevel, not args.bw, args.warn)
logger.debug(argv)
logger.debug(args)
try:
if args.subcmd_name == 'demo':
demo_main()
elif args.subcmd_name in ['download', 'dl']:
download_main(args.dataset, args.region, args.delete_zip)
elif args.subcmd_name == 'version':
print(get_version(form='long' if args.long else 'short'))
except BasmatiError as be:
logger.error(be)
raise
except Exception as e:
logger.error(e)
raise
| true | true |
1c3a9dd8d891bfd4b9d4f475db13dadf84fa110b | 1,243 | py | Python | allennlp/modules/seq2vec_encoders/seq2vec_encoder.py | nadgeri14/allennlp | 2eefffaf71612263a1c20e8ce4107849cfd5efe3 | [
"Apache-2.0"
] | null | null | null | allennlp/modules/seq2vec_encoders/seq2vec_encoder.py | nadgeri14/allennlp | 2eefffaf71612263a1c20e8ce4107849cfd5efe3 | [
"Apache-2.0"
] | null | null | null | allennlp/modules/seq2vec_encoders/seq2vec_encoder.py | nadgeri14/allennlp | 2eefffaf71612263a1c20e8ce4107849cfd5efe3 | [
"Apache-2.0"
] | null | null | null | from allennlp.modules.encoder_base import _EncoderBase
from allennlp.common import Registrable
class Seq2VecEncoder(_EncoderBase, Registrable):
"""
A ``Seq2VecEncoder`` is a ``Module`` that takes as input a sequence of vectors and returns a
single vector. Input shape : ``(batch_size, sequence_length, input_dim)``; output shape:
``(batch_size, output_dim)``.
We add two methods to the basic ``Module`` API: :func:`get_input_dim()` and :func:`get_output_dim()`.
You might need this if you want to construct a ``Linear`` layer using the output of this encoder,
or to raise sensible errors for mis-matching input dimensions.
"""
def get_input_dim(self) -> int:
"""
Returns the dimension of the vector input for each element in the sequence input
to a ``Seq2VecEncoder``. This is `not` the shape of the input tensor, but the
last element of that shape.
"""
raise NotImplementedError
def get_output_dim(self) -> int:
"""
Returns the dimension of the final vector output by this ``Seq2VecEncoder``. This is `not`
the shape of the returned tensor, but the last element of that shape.
"""
raise NotImplementedError
| 41.433333 | 105 | 0.680611 | from allennlp.modules.encoder_base import _EncoderBase
from allennlp.common import Registrable
class Seq2VecEncoder(_EncoderBase, Registrable):
def get_input_dim(self) -> int:
raise NotImplementedError
def get_output_dim(self) -> int:
raise NotImplementedError
| true | true |
1c3a9e2d07809822b70de13a2212e36d403b00b3 | 149 | py | Python | Muta3DMaps/core/AsyncV/__init__.py | NatureGeorge/SIFTS_Plus_Muta_Maps | 60f84e6024508e65ee3791103762b95666d3c646 | [
"MIT"
] | null | null | null | Muta3DMaps/core/AsyncV/__init__.py | NatureGeorge/SIFTS_Plus_Muta_Maps | 60f84e6024508e65ee3791103762b95666d3c646 | [
"MIT"
] | null | null | null | Muta3DMaps/core/AsyncV/__init__.py | NatureGeorge/SIFTS_Plus_Muta_Maps | 60f84e6024508e65ee3791103762b95666d3c646 | [
"MIT"
] | null | null | null | # @Date: 2019-11-20T22:46:50+08:00
# @Email: 1730416009@stu.suda.edu.cn
# @Filename: __init__.py
# @Last modified time: 2019-11-24T23:13:42+08:00
| 29.8 | 48 | 0.691275 | true | true | |
1c3a9e8a268974d656e0c50418552adb90db0085 | 1,799 | py | Python | test/functional/sapling_malleable_sigs.py | ISLAMIC-DIGITAL-COIN/IDC | f78f9f0aa065698b4f826ed7765e9591e7ca855d | [
"MIT"
] | 1 | 2021-12-30T23:56:46.000Z | 2021-12-30T23:56:46.000Z | test/functional/sapling_malleable_sigs.py | martin-braun/IDC | 96af3558b4e2ae127082856ac9ed134d04878e69 | [
"MIT"
] | null | null | null | test/functional/sapling_malleable_sigs.py | martin-braun/IDC | 96af3558b4e2ae127082856ac9ed134d04878e69 | [
"MIT"
] | 1 | 2022-01-10T22:15:21.000Z | 2022-01-10T22:15:21.000Z | #!/usr/bin/env python3
# Copyright (c) 2018 The Zcash developers
# Copyright (c) 2020 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.test_framework import islamic_digital_coinTestFramework
from test_framework.messages import (
CTransaction,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
bytes_to_hex_str,
hex_str_to_bytes,
)
from decimal import Decimal
from io import BytesIO
class MalleableSigsTest(islamic_digital_coinTestFramework):
def set_test_params(self):
self.num_nodes = 1
saplingUpgrade = ['-nuparams=v5_shield:201']
self.extra_args = [saplingUpgrade]
def run_test(self):
node = self.nodes[0]
node.generate(2)
assert_equal(node.getblockcount(), 202)
z_addr = node.getnewshieldaddress()
shield_to = [{"address": z_addr, "amount": Decimal('10')}]
# Create rawtx shielding 10 IDC
self.log.info("Shielding 10 IDC...")
rawtx_hex = node.rawshieldsendmany("from_transparent", shield_to)
self.log.info("Raw tx created")
# Creating malleated tx
self.log.info("Removing sapling data...")
new_tx = CTransaction()
new_tx.deserialize(BytesIO(hex_str_to_bytes(rawtx_hex)))
new_tx.sapData = b""
new_rawtx = bytes_to_hex_str(new_tx.serialize())
self.log.info("Sending malleated tx...")
assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed",
node.sendrawtransaction, new_rawtx, True)
self.log.info("Good. Tx NOT accepted in mempool")
if __name__ == '__main__':
MalleableSigsTest().main()
| 31.561404 | 75 | 0.683157 |
from test_framework.test_framework import islamic_digital_coinTestFramework
from test_framework.messages import (
CTransaction,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
bytes_to_hex_str,
hex_str_to_bytes,
)
from decimal import Decimal
from io import BytesIO
class MalleableSigsTest(islamic_digital_coinTestFramework):
def set_test_params(self):
self.num_nodes = 1
saplingUpgrade = ['-nuparams=v5_shield:201']
self.extra_args = [saplingUpgrade]
def run_test(self):
node = self.nodes[0]
node.generate(2)
assert_equal(node.getblockcount(), 202)
z_addr = node.getnewshieldaddress()
shield_to = [{"address": z_addr, "amount": Decimal('10')}]
self.log.info("Shielding 10 IDC...")
rawtx_hex = node.rawshieldsendmany("from_transparent", shield_to)
self.log.info("Raw tx created")
self.log.info("Removing sapling data...")
new_tx = CTransaction()
new_tx.deserialize(BytesIO(hex_str_to_bytes(rawtx_hex)))
new_tx.sapData = b""
new_rawtx = bytes_to_hex_str(new_tx.serialize())
self.log.info("Sending malleated tx...")
assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed",
node.sendrawtransaction, new_rawtx, True)
self.log.info("Good. Tx NOT accepted in mempool")
if __name__ == '__main__':
MalleableSigsTest().main()
| true | true |
1c3a9e997419f1c66cb061c2fa8a7c938ae57ecc | 1,929 | py | Python | panel/models/tabulator.py | Jacob-Barhak/panel | 04cad38ea703e4e69fb76f063a27f4ffe40688e8 | [
"BSD-3-Clause"
] | 1 | 2021-03-09T04:46:05.000Z | 2021-03-09T04:46:05.000Z | panel/models/tabulator.py | Jacob-Barhak/panel | 04cad38ea703e4e69fb76f063a27f4ffe40688e8 | [
"BSD-3-Clause"
] | 2 | 2022-01-13T03:54:51.000Z | 2022-03-12T01:01:00.000Z | panel/models/tabulator.py | Jacob-Barhak/panel | 04cad38ea703e4e69fb76f063a27f4ffe40688e8 | [
"BSD-3-Clause"
] | null | null | null | """
Implementation of the Tabulator model.
See http://tabulator.info/
"""
from bokeh.core.properties import (
Any, Bool, Dict, Enum, Instance, Int, List, Nullable, String
)
from bokeh.models import ColumnDataSource
from bokeh.models.layouts import HTMLBox
from bokeh.models.widgets.tables import TableColumn
JS_SRC = "https://unpkg.com/tabulator-tables@4.9.3/dist/js/tabulator.js"
MOMENT_SRC = "https://unpkg.com/moment@2.27.0/moment.js"
THEME_URL = "https://unpkg.com/tabulator-tables@4.9.3/dist/css/"
TABULATOR_THEMES = [
'default', 'site', 'simple', 'midnight', 'modern', 'bootstrap',
'bootstrap4', 'materialize', 'bulma', 'semantic-ui'
]
class DataTabulator(HTMLBox):
"""A Bokeh Model that enables easy use of Tabulator tables
See http://tabulator.info/
"""
configuration = Dict(String, Any)
columns = List(Instance(TableColumn), help="""
The list of child column widgets.
""")
download = Bool(default=False)
editable = Bool(default=True)
filename = String(default="table.csv")
follow = Bool(True)
frozen_rows = List(Int)
groupby = List(String)
hidden_columns = List(String)
layout = Enum('fit_data', 'fit_data_fill', 'fit_data_stretch', 'fit_data_table', 'fit_columns', default="fit_data")
source = Instance(ColumnDataSource)
styles = Dict(Int, Dict(Int, List(String)))
pagination = Nullable(String)
page = Nullable(Int)
page_size = Int()
max_page = Int()
sorters = List(Dict(String, String))
theme = Enum(*TABULATOR_THEMES, default="simple")
theme_url = String(default=THEME_URL)
__css__ = [THEME_URL+'tabulator_simple.min.css']
__javascript__ = [
JS_SRC,
MOMENT_SRC
]
__js_require__ = {
'paths': {
'tabulator': JS_SRC[:-3]
},
'exports': {'tabulator': 'Tabulator'}
}
__js_skip__ = {'tabulator': __javascript__}
| 23.240964 | 119 | 0.658891 | from bokeh.core.properties import (
Any, Bool, Dict, Enum, Instance, Int, List, Nullable, String
)
from bokeh.models import ColumnDataSource
from bokeh.models.layouts import HTMLBox
from bokeh.models.widgets.tables import TableColumn
JS_SRC = "https://unpkg.com/tabulator-tables@4.9.3/dist/js/tabulator.js"
MOMENT_SRC = "https://unpkg.com/moment@2.27.0/moment.js"
THEME_URL = "https://unpkg.com/tabulator-tables@4.9.3/dist/css/"
TABULATOR_THEMES = [
'default', 'site', 'simple', 'midnight', 'modern', 'bootstrap',
'bootstrap4', 'materialize', 'bulma', 'semantic-ui'
]
class DataTabulator(HTMLBox):
configuration = Dict(String, Any)
columns = List(Instance(TableColumn), help="""
The list of child column widgets.
""")
download = Bool(default=False)
editable = Bool(default=True)
filename = String(default="table.csv")
follow = Bool(True)
frozen_rows = List(Int)
groupby = List(String)
hidden_columns = List(String)
layout = Enum('fit_data', 'fit_data_fill', 'fit_data_stretch', 'fit_data_table', 'fit_columns', default="fit_data")
source = Instance(ColumnDataSource)
styles = Dict(Int, Dict(Int, List(String)))
pagination = Nullable(String)
page = Nullable(Int)
page_size = Int()
max_page = Int()
sorters = List(Dict(String, String))
theme = Enum(*TABULATOR_THEMES, default="simple")
theme_url = String(default=THEME_URL)
__css__ = [THEME_URL+'tabulator_simple.min.css']
__javascript__ = [
JS_SRC,
MOMENT_SRC
]
__js_require__ = {
'paths': {
'tabulator': JS_SRC[:-3]
},
'exports': {'tabulator': 'Tabulator'}
}
__js_skip__ = {'tabulator': __javascript__}
| true | true |
1c3a9f36db55aa34814abb37e951ccba7dc54305 | 11,362 | py | Python | neutron/tests/common/l3_test_common.py | glove747/liberty-neutron | 35a4c85e781d10da4521565c3a367e4ecb50739d | [
"Apache-2.0"
] | null | null | null | neutron/tests/common/l3_test_common.py | glove747/liberty-neutron | 35a4c85e781d10da4521565c3a367e4ecb50739d | [
"Apache-2.0"
] | null | null | null | neutron/tests/common/l3_test_common.py | glove747/liberty-neutron | 35a4c85e781d10da4521565c3a367e4ecb50739d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import netaddr
from oslo_utils import uuidutils
from six import moves
from neutron.common import constants as l3_constants
_uuid = uuidutils.generate_uuid
class FakeDev(object):
def __init__(self, name):
self.name = name
def get_ha_interface(ip='169.254.192.1', mac='12:34:56:78:2b:5d'):
subnet_id = _uuid()
return {'admin_state_up': True,
'device_id': _uuid(),
'device_owner': 'network:router_ha_interface',
'fixed_ips': [{'ip_address': ip,
'prefixlen': 18,
'subnet_id': subnet_id}],
'id': _uuid(),
'mac_address': mac,
'name': u'L3 HA Admin port 0',
'network_id': _uuid(),
'status': u'ACTIVE',
'subnets': [{'cidr': '169.254.192.0/18',
'gateway_ip': '169.254.255.254',
'id': subnet_id}],
'tenant_id': '',
'agent_id': _uuid(),
'agent_host': 'aaa',
'priority': 1}
def prepare_router_data(ip_version=4, enable_snat=None, num_internal_ports=1,
enable_floating_ip=False, enable_ha=False,
extra_routes=False, dual_stack=False,
v6_ext_gw_with_sub=True, **kwargs):
fixed_ips = []
subnets = []
gateway_mac = kwargs.get('gateway_mac', 'ca:fe:de:ad:be:ee')
extra_subnets = []
for loop_version in (4, 6):
if loop_version == 4 and (ip_version == 4 or dual_stack):
ip_address = kwargs.get('ip_address', '19.4.4.4')
prefixlen = 24
subnet_cidr = kwargs.get('subnet_cidr', '19.4.4.0/24')
gateway_ip = kwargs.get('gateway_ip', '19.4.4.1')
_extra_subnet = {'cidr': '9.4.5.0/24'}
elif (loop_version == 6 and (ip_version == 6 or dual_stack) and
v6_ext_gw_with_sub):
ip_address = kwargs.get('ip_address', 'fd00::4')
prefixlen = 64
subnet_cidr = kwargs.get('subnet_cidr', 'fd00::/64')
gateway_ip = kwargs.get('gateway_ip', 'fd00::1')
_extra_subnet = {'cidr': 'fd01::/64'}
else:
continue
subnet_id = _uuid()
fixed_ips.append({'ip_address': ip_address,
'subnet_id': subnet_id,
'prefixlen': prefixlen})
subnets.append({'id': subnet_id,
'cidr': subnet_cidr,
'gateway_ip': gateway_ip})
extra_subnets.append(_extra_subnet)
if not fixed_ips and v6_ext_gw_with_sub:
raise ValueError("Invalid ip_version: %s" % ip_version)
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'mac_address': gateway_mac,
'network_id': _uuid(),
'fixed_ips': fixed_ips,
'subnets': subnets,
'extra_subnets': extra_subnets}
routes = []
if extra_routes:
routes = [{'destination': '8.8.8.0/24', 'nexthop': '19.4.4.4'}]
router = {
'id': router_id,
'distributed': False,
l3_constants.INTERFACE_KEY: [],
'routes': routes,
'gw_port': ex_gw_port}
if enable_floating_ip:
router[l3_constants.FLOATINGIP_KEY] = [{
'id': _uuid(),
'port_id': _uuid(),
'status': 'DOWN',
'floating_ip_address': '19.4.4.2',
'fixed_ip_address': '10.0.0.1'}]
router_append_interface(router, count=num_internal_ports,
ip_version=ip_version, dual_stack=dual_stack)
if enable_ha:
router['ha'] = True
router['ha_vr_id'] = 1
router[l3_constants.HA_INTERFACE_KEY] = (get_ha_interface())
if enable_snat is not None:
router['enable_snat'] = enable_snat
return router
def get_subnet_id(port):
return port['fixed_ips'][0]['subnet_id']
def router_append_interface(router, count=1, ip_version=4, ra_mode=None,
addr_mode=None, dual_stack=False):
interfaces = router[l3_constants.INTERFACE_KEY]
current = sum(
[netaddr.IPNetwork(subnet['cidr']).version == ip_version
for p in interfaces for subnet in p['subnets']])
mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
mac_address.dialect = netaddr.mac_unix
for i in range(current, current + count):
fixed_ips = []
subnets = []
for loop_version in (4, 6):
if loop_version == 4 and (ip_version == 4 or dual_stack):
ip_pool = '35.4.%i.4'
cidr_pool = '35.4.%i.0/24'
prefixlen = 24
gw_pool = '35.4.%i.1'
elif loop_version == 6 and (ip_version == 6 or dual_stack):
ip_pool = 'fd01:%x:1::6'
cidr_pool = 'fd01:%x:1::/64'
prefixlen = 64
gw_pool = 'fd01:%x:1::1'
else:
continue
subnet_id = _uuid()
fixed_ips.append({'ip_address': ip_pool % i,
'subnet_id': subnet_id,
'prefixlen': prefixlen})
subnets.append({'id': subnet_id,
'cidr': cidr_pool % i,
'gateway_ip': gw_pool % i,
'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': addr_mode})
if not fixed_ips:
raise ValueError("Invalid ip_version: %s" % ip_version)
interfaces.append(
{'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': fixed_ips,
'mac_address': str(mac_address),
'subnets': subnets})
mac_address.value += 1
def router_append_subnet(router, count=1, ip_version=4,
ipv6_subnet_modes=None, interface_id=None):
if ip_version == 6:
subnet_mode_none = {'ra_mode': None, 'address_mode': None}
if not ipv6_subnet_modes:
ipv6_subnet_modes = [subnet_mode_none] * count
elif len(ipv6_subnet_modes) != count:
ipv6_subnet_modes.extend([subnet_mode_none for i in
moves.range(len(ipv6_subnet_modes),
count)])
if ip_version == 4:
ip_pool = '35.4.%i.4'
cidr_pool = '35.4.%i.0/24'
prefixlen = 24
gw_pool = '35.4.%i.1'
elif ip_version == 6:
ip_pool = 'fd01:%x::6'
cidr_pool = 'fd01:%x::/64'
prefixlen = 64
gw_pool = 'fd01:%x::1'
else:
raise ValueError("Invalid ip_version: %s" % ip_version)
interfaces = copy.deepcopy(router.get(l3_constants.INTERFACE_KEY, []))
if interface_id:
try:
interface = next(i for i in interfaces
if i['id'] == interface_id)
except StopIteration:
raise ValueError("interface_id not found")
fixed_ips, subnets = interface['fixed_ips'], interface['subnets']
else:
interface = None
fixed_ips, subnets = [], []
num_existing_subnets = len(subnets)
for i in moves.range(count):
subnet_id = _uuid()
fixed_ips.append(
{'ip_address': ip_pool % (i + num_existing_subnets),
'subnet_id': subnet_id,
'prefixlen': prefixlen})
subnets.append(
{'id': subnet_id,
'cidr': cidr_pool % (i + num_existing_subnets),
'gateway_ip': gw_pool % (i + num_existing_subnets),
'ipv6_ra_mode': ipv6_subnet_modes[i]['ra_mode'],
'ipv6_address_mode': ipv6_subnet_modes[i]['address_mode']})
if interface:
# Update old interface
index = interfaces.index(interface)
interfaces[index].update({'fixed_ips': fixed_ips, 'subnets': subnets})
else:
# New interface appended to interfaces list
mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
mac_address.dialect = netaddr.mac_unix
interfaces.append(
{'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'mac_address': str(mac_address),
'fixed_ips': fixed_ips,
'subnets': subnets})
router[l3_constants.INTERFACE_KEY] = interfaces
def router_append_pd_enabled_subnet(router, count=1):
interfaces = router[l3_constants.INTERFACE_KEY]
current = sum(netaddr.IPNetwork(subnet['cidr']).version == 6
for p in interfaces for subnet in p['subnets'])
mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
mac_address.dialect = netaddr.mac_unix
pd_intfs = []
for i in range(current, current + count):
subnet_id = _uuid()
intf = {'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '::1',
'prefixlen': 64,
'subnet_id': subnet_id}],
'mac_address': str(mac_address),
'subnets': [{'id': subnet_id,
'cidr': l3_constants.PROVISIONAL_IPV6_PD_PREFIX,
'gateway_ip': '::1',
'ipv6_ra_mode': l3_constants.IPV6_SLAAC,
'subnetpool_id': l3_constants.IPV6_PD_POOL_ID}]}
interfaces.append(intf)
pd_intfs.append(intf)
mac_address.value += 1
return pd_intfs
def prepare_ext_gw_test(context, ri, dual_stack=False):
subnet_id = _uuid()
fixed_ips = [{'subnet_id': subnet_id,
'ip_address': '20.0.0.30',
'prefixlen': 24}]
subnets = [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}]
if dual_stack:
subnet_id_v6 = _uuid()
fixed_ips.append({'subnet_id': subnet_id_v6,
'ip_address': '2001:192:168:100::2',
'prefixlen': 64})
subnets.append({'id': subnet_id_v6,
'cidr': '2001:192:168:100::/64',
'gateway_ip': '2001:192:168:100::1'})
ex_gw_port = {'fixed_ips': fixed_ips,
'subnets': subnets,
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
interface_name = ri.get_external_device_name(ex_gw_port['id'])
context.device_exists.return_value = True
return interface_name, ex_gw_port
| 37.622517 | 78 | 0.539606 |
import copy
import netaddr
from oslo_utils import uuidutils
from six import moves
from neutron.common import constants as l3_constants
_uuid = uuidutils.generate_uuid
class FakeDev(object):
def __init__(self, name):
self.name = name
def get_ha_interface(ip='169.254.192.1', mac='12:34:56:78:2b:5d'):
subnet_id = _uuid()
return {'admin_state_up': True,
'device_id': _uuid(),
'device_owner': 'network:router_ha_interface',
'fixed_ips': [{'ip_address': ip,
'prefixlen': 18,
'subnet_id': subnet_id}],
'id': _uuid(),
'mac_address': mac,
'name': u'L3 HA Admin port 0',
'network_id': _uuid(),
'status': u'ACTIVE',
'subnets': [{'cidr': '169.254.192.0/18',
'gateway_ip': '169.254.255.254',
'id': subnet_id}],
'tenant_id': '',
'agent_id': _uuid(),
'agent_host': 'aaa',
'priority': 1}
def prepare_router_data(ip_version=4, enable_snat=None, num_internal_ports=1,
enable_floating_ip=False, enable_ha=False,
extra_routes=False, dual_stack=False,
v6_ext_gw_with_sub=True, **kwargs):
fixed_ips = []
subnets = []
gateway_mac = kwargs.get('gateway_mac', 'ca:fe:de:ad:be:ee')
extra_subnets = []
for loop_version in (4, 6):
if loop_version == 4 and (ip_version == 4 or dual_stack):
ip_address = kwargs.get('ip_address', '19.4.4.4')
prefixlen = 24
subnet_cidr = kwargs.get('subnet_cidr', '19.4.4.0/24')
gateway_ip = kwargs.get('gateway_ip', '19.4.4.1')
_extra_subnet = {'cidr': '9.4.5.0/24'}
elif (loop_version == 6 and (ip_version == 6 or dual_stack) and
v6_ext_gw_with_sub):
ip_address = kwargs.get('ip_address', 'fd00::4')
prefixlen = 64
subnet_cidr = kwargs.get('subnet_cidr', 'fd00::/64')
gateway_ip = kwargs.get('gateway_ip', 'fd00::1')
_extra_subnet = {'cidr': 'fd01::/64'}
else:
continue
subnet_id = _uuid()
fixed_ips.append({'ip_address': ip_address,
'subnet_id': subnet_id,
'prefixlen': prefixlen})
subnets.append({'id': subnet_id,
'cidr': subnet_cidr,
'gateway_ip': gateway_ip})
extra_subnets.append(_extra_subnet)
if not fixed_ips and v6_ext_gw_with_sub:
raise ValueError("Invalid ip_version: %s" % ip_version)
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'mac_address': gateway_mac,
'network_id': _uuid(),
'fixed_ips': fixed_ips,
'subnets': subnets,
'extra_subnets': extra_subnets}
routes = []
if extra_routes:
routes = [{'destination': '8.8.8.0/24', 'nexthop': '19.4.4.4'}]
router = {
'id': router_id,
'distributed': False,
l3_constants.INTERFACE_KEY: [],
'routes': routes,
'gw_port': ex_gw_port}
if enable_floating_ip:
router[l3_constants.FLOATINGIP_KEY] = [{
'id': _uuid(),
'port_id': _uuid(),
'status': 'DOWN',
'floating_ip_address': '19.4.4.2',
'fixed_ip_address': '10.0.0.1'}]
router_append_interface(router, count=num_internal_ports,
ip_version=ip_version, dual_stack=dual_stack)
if enable_ha:
router['ha'] = True
router['ha_vr_id'] = 1
router[l3_constants.HA_INTERFACE_KEY] = (get_ha_interface())
if enable_snat is not None:
router['enable_snat'] = enable_snat
return router
def get_subnet_id(port):
return port['fixed_ips'][0]['subnet_id']
def router_append_interface(router, count=1, ip_version=4, ra_mode=None,
addr_mode=None, dual_stack=False):
interfaces = router[l3_constants.INTERFACE_KEY]
current = sum(
[netaddr.IPNetwork(subnet['cidr']).version == ip_version
for p in interfaces for subnet in p['subnets']])
mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
mac_address.dialect = netaddr.mac_unix
for i in range(current, current + count):
fixed_ips = []
subnets = []
for loop_version in (4, 6):
if loop_version == 4 and (ip_version == 4 or dual_stack):
ip_pool = '35.4.%i.4'
cidr_pool = '35.4.%i.0/24'
prefixlen = 24
gw_pool = '35.4.%i.1'
elif loop_version == 6 and (ip_version == 6 or dual_stack):
ip_pool = 'fd01:%x:1::6'
cidr_pool = 'fd01:%x:1::/64'
prefixlen = 64
gw_pool = 'fd01:%x:1::1'
else:
continue
subnet_id = _uuid()
fixed_ips.append({'ip_address': ip_pool % i,
'subnet_id': subnet_id,
'prefixlen': prefixlen})
subnets.append({'id': subnet_id,
'cidr': cidr_pool % i,
'gateway_ip': gw_pool % i,
'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': addr_mode})
if not fixed_ips:
raise ValueError("Invalid ip_version: %s" % ip_version)
interfaces.append(
{'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': fixed_ips,
'mac_address': str(mac_address),
'subnets': subnets})
mac_address.value += 1
def router_append_subnet(router, count=1, ip_version=4,
ipv6_subnet_modes=None, interface_id=None):
if ip_version == 6:
subnet_mode_none = {'ra_mode': None, 'address_mode': None}
if not ipv6_subnet_modes:
ipv6_subnet_modes = [subnet_mode_none] * count
elif len(ipv6_subnet_modes) != count:
ipv6_subnet_modes.extend([subnet_mode_none for i in
moves.range(len(ipv6_subnet_modes),
count)])
if ip_version == 4:
ip_pool = '35.4.%i.4'
cidr_pool = '35.4.%i.0/24'
prefixlen = 24
gw_pool = '35.4.%i.1'
elif ip_version == 6:
ip_pool = 'fd01:%x::6'
cidr_pool = 'fd01:%x::/64'
prefixlen = 64
gw_pool = 'fd01:%x::1'
else:
raise ValueError("Invalid ip_version: %s" % ip_version)
interfaces = copy.deepcopy(router.get(l3_constants.INTERFACE_KEY, []))
if interface_id:
try:
interface = next(i for i in interfaces
if i['id'] == interface_id)
except StopIteration:
raise ValueError("interface_id not found")
fixed_ips, subnets = interface['fixed_ips'], interface['subnets']
else:
interface = None
fixed_ips, subnets = [], []
num_existing_subnets = len(subnets)
for i in moves.range(count):
subnet_id = _uuid()
fixed_ips.append(
{'ip_address': ip_pool % (i + num_existing_subnets),
'subnet_id': subnet_id,
'prefixlen': prefixlen})
subnets.append(
{'id': subnet_id,
'cidr': cidr_pool % (i + num_existing_subnets),
'gateway_ip': gw_pool % (i + num_existing_subnets),
'ipv6_ra_mode': ipv6_subnet_modes[i]['ra_mode'],
'ipv6_address_mode': ipv6_subnet_modes[i]['address_mode']})
if interface:
index = interfaces.index(interface)
interfaces[index].update({'fixed_ips': fixed_ips, 'subnets': subnets})
else:
mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
mac_address.dialect = netaddr.mac_unix
interfaces.append(
{'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'mac_address': str(mac_address),
'fixed_ips': fixed_ips,
'subnets': subnets})
router[l3_constants.INTERFACE_KEY] = interfaces
def router_append_pd_enabled_subnet(router, count=1):
interfaces = router[l3_constants.INTERFACE_KEY]
current = sum(netaddr.IPNetwork(subnet['cidr']).version == 6
for p in interfaces for subnet in p['subnets'])
mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
mac_address.dialect = netaddr.mac_unix
pd_intfs = []
for i in range(current, current + count):
subnet_id = _uuid()
intf = {'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '::1',
'prefixlen': 64,
'subnet_id': subnet_id}],
'mac_address': str(mac_address),
'subnets': [{'id': subnet_id,
'cidr': l3_constants.PROVISIONAL_IPV6_PD_PREFIX,
'gateway_ip': '::1',
'ipv6_ra_mode': l3_constants.IPV6_SLAAC,
'subnetpool_id': l3_constants.IPV6_PD_POOL_ID}]}
interfaces.append(intf)
pd_intfs.append(intf)
mac_address.value += 1
return pd_intfs
def prepare_ext_gw_test(context, ri, dual_stack=False):
subnet_id = _uuid()
fixed_ips = [{'subnet_id': subnet_id,
'ip_address': '20.0.0.30',
'prefixlen': 24}]
subnets = [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}]
if dual_stack:
subnet_id_v6 = _uuid()
fixed_ips.append({'subnet_id': subnet_id_v6,
'ip_address': '2001:192:168:100::2',
'prefixlen': 64})
subnets.append({'id': subnet_id_v6,
'cidr': '2001:192:168:100::/64',
'gateway_ip': '2001:192:168:100::1'})
ex_gw_port = {'fixed_ips': fixed_ips,
'subnets': subnets,
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
interface_name = ri.get_external_device_name(ex_gw_port['id'])
context.device_exists.return_value = True
return interface_name, ex_gw_port
| true | true |
1c3aa08c26ba618b852d49dd757979604b6e5f00 | 135 | py | Python | fed_distill/resnet/__init__.py | Ahmedjjj/dataset-distillation | f2e4267d070c7fb8e50476297e95638f351b76d6 | [
"MIT"
] | null | null | null | fed_distill/resnet/__init__.py | Ahmedjjj/dataset-distillation | f2e4267d070c7fb8e50476297e95638f351b76d6 | [
"MIT"
] | null | null | null | fed_distill/resnet/__init__.py | Ahmedjjj/dataset-distillation | f2e4267d070c7fb8e50476297e95638f351b76d6 | [
"MIT"
] | null | null | null | from fed_distill.resnet.deep_inv import get_resnet_cifar_adi, get_resnet_cifar_di
from fed_distill.resnet.resnet_cifar import ResNet18
| 45 | 81 | 0.896296 | from fed_distill.resnet.deep_inv import get_resnet_cifar_adi, get_resnet_cifar_di
from fed_distill.resnet.resnet_cifar import ResNet18
| true | true |
1c3aa1c173c1c552d607933ad7f23096267b65be | 939 | py | Python | 1101-1200/1152-Erect the Fence/1152-Erect the Fence.py | jiadaizhao/LintCode | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 77 | 2017-12-30T13:33:37.000Z | 2022-01-16T23:47:08.000Z | 1101-1200/1152-Erect the Fence/1152-Erect the Fence.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 1 | 2018-05-14T14:15:40.000Z | 2018-05-14T14:15:40.000Z | 1101-1200/1152-Erect the Fence/1152-Erect the Fence.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 39 | 2017-12-07T14:36:25.000Z | 2022-03-10T23:05:37.000Z | """
Definition for a point.
class Point:
def __init__(self, a=0, b=0):
self.x = a
self.y = b
"""
class Solution:
"""
@param points: List[point]
@return: return List[point]
"""
def outerTrees(self, points):
# write your code here
def orientation(p, q, r):
return (q.x - p.x) * (r.y - q.y) - (q.y - p.y) * (r.x - q.x)
points.sort(key=lambda p: (p.x, p.y))
hull = []
for point in points:
while len(hull) >= 2 and orientation(hull[-2], hull[-1], point) < 0:
hull.pop()
hull.append(point)
if len(hull) == len(points):
return hull
for point in points[:-1][::-1]:
while len(hull) >= 2 and orientation(hull[-2], hull[-1], point) < 0:
hull.pop()
hull.append(point)
hull.pop()
return sorted(hull, key=lambda p: (p.x, p.y))
| 28.454545 | 80 | 0.479233 |
class Solution:
def outerTrees(self, points):
def orientation(p, q, r):
return (q.x - p.x) * (r.y - q.y) - (q.y - p.y) * (r.x - q.x)
points.sort(key=lambda p: (p.x, p.y))
hull = []
for point in points:
while len(hull) >= 2 and orientation(hull[-2], hull[-1], point) < 0:
hull.pop()
hull.append(point)
if len(hull) == len(points):
return hull
for point in points[:-1][::-1]:
while len(hull) >= 2 and orientation(hull[-2], hull[-1], point) < 0:
hull.pop()
hull.append(point)
hull.pop()
return sorted(hull, key=lambda p: (p.x, p.y))
| true | true |
1c3aa23cdce53bafdf329c20bc2930e04167cc41 | 3,034 | py | Python | nova/tests/unit/virt/libvirt/volume/test_scaleio.py | lixiaoy1/nova | 357b8b38e88300948bb2e07d1bbaabd1e9d7b60e | [
"Apache-2.0"
] | 1 | 2018-12-28T06:47:39.000Z | 2018-12-28T06:47:39.000Z | nova/tests/unit/virt/libvirt/volume/test_scaleio.py | lixiaoy1/nova | 357b8b38e88300948bb2e07d1bbaabd1e9d7b60e | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/virt/libvirt/volume/test_scaleio.py | lixiaoy1/nova | 357b8b38e88300948bb2e07d1bbaabd1e9d7b60e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_brick.initiator import connector
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import scaleio
class LibvirtScaleIOVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
def test_libvirt_scaleio_driver(self):
libvirt_driver = scaleio.LibvirtScaleIOVolumeDriver(
self.fake_host)
self.assertIsInstance(libvirt_driver.connector,
connector.ScaleIOConnector)
def test_libvirt_scaleio_driver_connect(self):
def brick_conn_vol(data):
return {'path': '/dev/vol01'}
sio = scaleio.LibvirtScaleIOVolumeDriver(self.fake_host)
sio.connector.connect_volume = brick_conn_vol
disk_info = {'path': '/dev/vol01', 'name': 'vol01'}
conn = {'data': disk_info}
sio.connect_volume(conn, mock.sentinel.instance)
self.assertEqual('/dev/vol01',
conn['data']['device_path'])
def test_libvirt_scaleio_driver_get_config(self):
sio = scaleio.LibvirtScaleIOVolumeDriver(self.fake_host)
conn = {'data': {'device_path': '/dev/vol01'}}
conf = sio.get_config(conn, self.disk_info)
self.assertEqual('block', conf.source_type)
self.assertEqual('/dev/vol01', conf.source_path)
def test_libvirt_scaleio_driver_disconnect(self):
sio = scaleio.LibvirtScaleIOVolumeDriver(self.fake_host)
sio.connector.disconnect_volume = mock.MagicMock()
conn = {'data': mock.sentinel.conn_data}
sio.disconnect_volume(conn, mock.sentinel.instance)
sio.connector.disconnect_volume.assert_called_once_with(
mock.sentinel.conn_data, None)
def test_libvirt_scaleio_driver_extend_volume(self):
def brick_extend_vol(data):
return data['size']
extended_vol_size = 8
sio = scaleio.LibvirtScaleIOVolumeDriver(self.fake_host)
disk_info = {'size': extended_vol_size,
'name': 'vol01',
'device_path': '/dev/vol01'}
conn = {'data': disk_info}
with mock.patch.object(sio.connector,
'extend_volume',
side_effect=brick_extend_vol):
self.assertEqual(extended_vol_size,
sio.extend_volume(conn, mock.sentinel.instance))
| 40.453333 | 78 | 0.664799 |
import mock
from os_brick.initiator import connector
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import scaleio
class LibvirtScaleIOVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
def test_libvirt_scaleio_driver(self):
libvirt_driver = scaleio.LibvirtScaleIOVolumeDriver(
self.fake_host)
self.assertIsInstance(libvirt_driver.connector,
connector.ScaleIOConnector)
def test_libvirt_scaleio_driver_connect(self):
def brick_conn_vol(data):
return {'path': '/dev/vol01'}
sio = scaleio.LibvirtScaleIOVolumeDriver(self.fake_host)
sio.connector.connect_volume = brick_conn_vol
disk_info = {'path': '/dev/vol01', 'name': 'vol01'}
conn = {'data': disk_info}
sio.connect_volume(conn, mock.sentinel.instance)
self.assertEqual('/dev/vol01',
conn['data']['device_path'])
def test_libvirt_scaleio_driver_get_config(self):
sio = scaleio.LibvirtScaleIOVolumeDriver(self.fake_host)
conn = {'data': {'device_path': '/dev/vol01'}}
conf = sio.get_config(conn, self.disk_info)
self.assertEqual('block', conf.source_type)
self.assertEqual('/dev/vol01', conf.source_path)
def test_libvirt_scaleio_driver_disconnect(self):
sio = scaleio.LibvirtScaleIOVolumeDriver(self.fake_host)
sio.connector.disconnect_volume = mock.MagicMock()
conn = {'data': mock.sentinel.conn_data}
sio.disconnect_volume(conn, mock.sentinel.instance)
sio.connector.disconnect_volume.assert_called_once_with(
mock.sentinel.conn_data, None)
def test_libvirt_scaleio_driver_extend_volume(self):
def brick_extend_vol(data):
return data['size']
extended_vol_size = 8
sio = scaleio.LibvirtScaleIOVolumeDriver(self.fake_host)
disk_info = {'size': extended_vol_size,
'name': 'vol01',
'device_path': '/dev/vol01'}
conn = {'data': disk_info}
with mock.patch.object(sio.connector,
'extend_volume',
side_effect=brick_extend_vol):
self.assertEqual(extended_vol_size,
sio.extend_volume(conn, mock.sentinel.instance))
| true | true |
1c3aa2b66d2aadf5d9b79addb89a1d275b4cfe78 | 1,036 | py | Python | pgroonga/migrations/0003_v2_api_upgrade.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | 1 | 2020-03-19T00:52:48.000Z | 2020-03-19T00:52:48.000Z | pgroonga/migrations/0003_v2_api_upgrade.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | null | null | null | pgroonga/migrations/0003_v2_api_upgrade.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | 1 | 2020-07-06T11:43:28.000Z | 2020-07-06T11:43:28.000Z | from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
atomic = False
dependencies = [
('pgroonga', '0002_html_escape_subject'),
]
database_setting = settings.DATABASES["default"]
operations = [
migrations.RunSQL(["""
ALTER ROLE %(USER)s SET search_path TO %(SCHEMA)s,public;
SET search_path = %(SCHEMA)s,public;
DROP INDEX zerver_message_search_pgroonga;
""" % database_setting, """
CREATE INDEX CONCURRENTLY zerver_message_search_pgroonga ON zerver_message
USING pgroonga(search_pgroonga pgroonga_text_full_text_search_ops_v2);
"""],
["""
ALTER ROLE %(USER)s SET search_path TO %(SCHEMA)s,public,pgroonga,pg_catalog;
SET search_path = %(SCHEMA)s,public,pgroonga,pg_catalog;
DROP INDEX zerver_message_search_pgroonga;
""" % database_setting, """
CREATE INDEX CONCURRENTLY zerver_message_search_pgroonga ON zerver_message
USING pgroonga(search_pgroonga pgroonga.text_full_text_search_ops);
"""])
]
| 28 | 77 | 0.733591 | from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
atomic = False
dependencies = [
('pgroonga', '0002_html_escape_subject'),
]
database_setting = settings.DATABASES["default"]
operations = [
migrations.RunSQL(["""
ALTER ROLE %(USER)s SET search_path TO %(SCHEMA)s,public;
SET search_path = %(SCHEMA)s,public;
DROP INDEX zerver_message_search_pgroonga;
""" % database_setting, """
CREATE INDEX CONCURRENTLY zerver_message_search_pgroonga ON zerver_message
USING pgroonga(search_pgroonga pgroonga_text_full_text_search_ops_v2);
"""],
["""
ALTER ROLE %(USER)s SET search_path TO %(SCHEMA)s,public,pgroonga,pg_catalog;
SET search_path = %(SCHEMA)s,public,pgroonga,pg_catalog;
DROP INDEX zerver_message_search_pgroonga;
""" % database_setting, """
CREATE INDEX CONCURRENTLY zerver_message_search_pgroonga ON zerver_message
USING pgroonga(search_pgroonga pgroonga.text_full_text_search_ops);
"""])
]
| true | true |
1c3aa448b5cb1afcb70e5b7f5dc5aa3936e5f3a4 | 973 | py | Python | String/415. Add Strings.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | 138 | 2020-02-08T05:25:26.000Z | 2021-11-04T11:59:28.000Z | String/415. Add Strings.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | null | null | null | String/415. Add Strings.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | 24 | 2021-01-02T07:18:43.000Z | 2022-03-20T08:17:54.000Z | """
415. Add Strings
"""
class Solution:
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
i = len(num1)-1; j = len(num2)-1
carry = 0
res = ""
while i >= 0 or j >= 0 or carry > 0:
carry += ord(num1[i]) - ord('0') if i >= 0 else 0
carry += ord(num2[j]) - ord('0') if j >= 0 else 0
res+=str(carry%10)
carry //= 10
i-=1
j-=1
return res[::-1]
import itertools
class Solution:
def addStrings(self, num1, num2):
z = itertools.zip_longest(num1[::-1], num2[::-1], fillvalue='0')
res, carry, zero2 = [], 0, 2*ord('0')
for i in z:
cur_sum = ord(i[0]) + ord(i[1]) - zero2 + carry
res.append(str(cur_sum % 10))
carry = cur_sum // 10
return ('1' if carry else '') + ''.join(res[::-1]) | 27.8 | 73 | 0.432682 |
class Solution:
def addStrings(self, num1, num2):
i = len(num1)-1; j = len(num2)-1
carry = 0
res = ""
while i >= 0 or j >= 0 or carry > 0:
carry += ord(num1[i]) - ord('0') if i >= 0 else 0
carry += ord(num2[j]) - ord('0') if j >= 0 else 0
res+=str(carry%10)
carry //= 10
i-=1
j-=1
return res[::-1]
import itertools
class Solution:
def addStrings(self, num1, num2):
z = itertools.zip_longest(num1[::-1], num2[::-1], fillvalue='0')
res, carry, zero2 = [], 0, 2*ord('0')
for i in z:
cur_sum = ord(i[0]) + ord(i[1]) - zero2 + carry
res.append(str(cur_sum % 10))
carry = cur_sum // 10
return ('1' if carry else '') + ''.join(res[::-1]) | true | true |
1c3aa727a7efd294cba76ebcbd5e4e98734126c2 | 8,378 | py | Python | src/callbacks/wandb_callbacks.py | Unity-Technologies/lightning-hydra-template | 4bdf4e62c6f93021d7fae86a51c5d706990a933d | [
"MIT"
] | 3 | 2021-04-30T20:50:19.000Z | 2021-09-25T09:39:46.000Z | src/callbacks/wandb_callbacks.py | Unity-Technologies/lightning-hydra-template | 4bdf4e62c6f93021d7fae86a51c5d706990a933d | [
"MIT"
] | null | null | null | src/callbacks/wandb_callbacks.py | Unity-Technologies/lightning-hydra-template | 4bdf4e62c6f93021d7fae86a51c5d706990a933d | [
"MIT"
] | 1 | 2022-02-10T18:20:20.000Z | 2022-02-10T18:20:20.000Z | import glob
import os
from typing import List
import matplotlib.pyplot as plt
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from sklearn import metrics
from sklearn.metrics import f1_score, precision_score, recall_score
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
)
class WatchModelWithWandb(Callback):
"""Make WandbLogger watch model at the beginning of the run."""
def __init__(self, log: str = "gradients", log_freq: int = 100):
self.log = log
self.log_freq = log_freq
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)
class UploadCodeToWandbAsArtifact(Callback):
"""Upload all *.py files to wandb as an artifact, at the beginning of the run."""
def __init__(self, code_dir: str):
self.code_dir = code_dir
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact("project-source", type="code")
for path in glob.glob(os.path.join(self.code_dir, "**/*.py"), recursive=True):
code.add_file(path)
experiment.use_artifact(code)
class UploadCheckpointsToWandbAsArtifact(Callback):
"""Upload checkpoints to wandb as an artifact, at the end of run."""
def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False):
self.ckpt_dir = ckpt_dir
self.upload_best_only = upload_best_only
def on_train_end(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints")
if self.upload_best_only:
ckpts.add_file(trainer.checkpoint_callback.best_model_path)
else:
for path in glob.glob(os.path.join(self.ckpt_dir, "**/*.ckpt"), recursive=True):
ckpts.add_file(path)
experiment.use_artifact(ckpts)
class LogConfusionMatrixToWandb(Callback):
"""Generate confusion matrix every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate confusion matrix."""
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
# set figure size
plt.figure(figsize=(14, 8))
# set labels size
sn.set(font_scale=1.4)
# set font size
sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 8}, fmt="g")
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"confusion_matrix/{experiment.name}": wandb.Image(plt)}, commit=False)
# according to wandb docs this should also work but it crashes
# experiment.log(f{"confusion_matrix/{experiment.name}": plt})
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogF1PrecRecHeatmapToWandb(Callback):
"""Generate f1, precision, recall heatmap every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self, class_names: List[str] = None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate f1, precision and recall heatmap."""
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(preds, targets, average=None)
r = recall_score(preds, targets, average=None)
p = precision_score(preds, targets, average=None)
data = [f1, p, r]
# set figure size
plt.figure(figsize=(14, 3))
# set labels size
sn.set(font_scale=1.2)
# set font size
sn.heatmap(
data,
annot=True,
annot_kws={"size": 10},
fmt=".3f",
yticklabels=["F1", "Precision", "Recall"],
)
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"f1_p_r_heatmap/{experiment.name}": wandb.Image(plt)}, commit=False)
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class ImagePredictionLogger(Callback):
"""Logs a validation batch and their predictions to wandb.
Example adapted from:
https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY
"""
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
# get a validation batch from the validation dat loader
val_samples = next(iter(trainer.datamodule.val_dataloader()))
val_imgs, val_labels = val_samples
# run the batch through the network
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs)
preds = torch.argmax(logits, axis=-1)
# log the images as wandb Image
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
preds[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
)
| 34.056911 | 116 | 0.620196 | import glob
import os
from typing import List
import matplotlib.pyplot as plt
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from sklearn import metrics
from sklearn.metrics import f1_score, precision_score, recall_score
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
)
class WatchModelWithWandb(Callback):
def __init__(self, log: str = "gradients", log_freq: int = 100):
self.log = log
self.log_freq = log_freq
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)
class UploadCodeToWandbAsArtifact(Callback):
def __init__(self, code_dir: str):
self.code_dir = code_dir
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact("project-source", type="code")
for path in glob.glob(os.path.join(self.code_dir, "**/*.py"), recursive=True):
code.add_file(path)
experiment.use_artifact(code)
class UploadCheckpointsToWandbAsArtifact(Callback):
def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False):
self.ckpt_dir = ckpt_dir
self.upload_best_only = upload_best_only
def on_train_end(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints")
if self.upload_best_only:
ckpts.add_file(trainer.checkpoint_callback.best_model_path)
else:
for path in glob.glob(os.path.join(self.ckpt_dir, "**/*.ckpt"), recursive=True):
ckpts.add_file(path)
experiment.use_artifact(ckpts)
class LogConfusionMatrixToWandb(Callback):
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
plt.figure(figsize=(14, 8))
sn.set(font_scale=1.4)
sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 8}, fmt="g")
experiment.log({f"confusion_matrix/{experiment.name}": wandb.Image(plt)}, commit=False)
plt.clf()
self.preds.clear()
self.targets.clear()
class LogF1PrecRecHeatmapToWandb(Callback):
def __init__(self, class_names: List[str] = None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(preds, targets, average=None)
r = recall_score(preds, targets, average=None)
p = precision_score(preds, targets, average=None)
data = [f1, p, r]
plt.figure(figsize=(14, 3))
sn.set(font_scale=1.2)
sn.heatmap(
data,
annot=True,
annot_kws={"size": 10},
fmt=".3f",
yticklabels=["F1", "Precision", "Recall"],
)
experiment.log({f"f1_p_r_heatmap/{experiment.name}": wandb.Image(plt)}, commit=False)
plt.clf()
self.preds.clear()
self.targets.clear()
class ImagePredictionLogger(Callback):
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
val_samples = next(iter(trainer.datamodule.val_dataloader()))
val_imgs, val_labels = val_samples
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs)
preds = torch.argmax(logits, axis=-1)
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
preds[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
)
| true | true |
1c3aa7498c859db5061e314b79119fd96c0a1892 | 1,054 | py | Python | tests/test_perspective.py | gregunz/invertransforms | 3b0621c567d309ee054115cc90cb188808bd63b2 | [
"BSD-3-Clause"
] | 5 | 2019-10-14T18:56:08.000Z | 2019-10-17T13:50:11.000Z | tests/test_perspective.py | gregunz/invertransforms | 3b0621c567d309ee054115cc90cb188808bd63b2 | [
"BSD-3-Clause"
] | 1 | 2021-04-06T18:05:19.000Z | 2021-04-06T18:05:19.000Z | tests/test_perspective.py | gregunz/invertransforms | 3b0621c567d309ee054115cc90cb188808bd63b2 | [
"BSD-3-Clause"
] | null | null | null | import invertransforms as T
from invertransforms.lib import InvertibleError
from tests.invertible_test_case import InvertibleTestCase
class TestPerspective(InvertibleTestCase):
def test_invert_before_apply(self):
with self.assertRaises(InvertibleError):
T.RandomPerspective().inverse()
def test_invert(self):
tf_random = T.RandomPerspective(p=1)
img_inv = tf_random(self.img_pil)
tf_inv = tf_random.inverse()
# inversion is not pixel perfect so we are only comparing size for now
self.assertEqual(tf_inv(img_inv).size, self.img_pil.size)
self.assertIn('Perspective', repr(tf_inv))
self.assertIn('startpoints=', repr(tf_inv))
self.assertIn('endpoints=', repr(tf_inv))
self.assertIsInstance(tf_inv, T.Perspective)
self.assertIsInstance(tf_inv.inverse(), T.Perspective)
def test_identity(self):
tf_id = T.RandomPerspective(p=0)
self.assertEqual(self.n, tf_id(self.n))
self.assertEqual(self.n, tf_id.invert(self.n))
| 36.344828 | 78 | 0.701139 | import invertransforms as T
from invertransforms.lib import InvertibleError
from tests.invertible_test_case import InvertibleTestCase
class TestPerspective(InvertibleTestCase):
def test_invert_before_apply(self):
with self.assertRaises(InvertibleError):
T.RandomPerspective().inverse()
def test_invert(self):
tf_random = T.RandomPerspective(p=1)
img_inv = tf_random(self.img_pil)
tf_inv = tf_random.inverse()
self.assertEqual(tf_inv(img_inv).size, self.img_pil.size)
self.assertIn('Perspective', repr(tf_inv))
self.assertIn('startpoints=', repr(tf_inv))
self.assertIn('endpoints=', repr(tf_inv))
self.assertIsInstance(tf_inv, T.Perspective)
self.assertIsInstance(tf_inv.inverse(), T.Perspective)
def test_identity(self):
tf_id = T.RandomPerspective(p=0)
self.assertEqual(self.n, tf_id(self.n))
self.assertEqual(self.n, tf_id.invert(self.n))
| true | true |
1c3aa8e1d7c7b7e9cb0b5311c357596b0f060e61 | 305 | py | Python | careless/models/likelihoods/base.py | JBGreisman/careless | 8f6c0859973757d11b26b65d9dc51d443030aa70 | [
"MIT"
] | 5 | 2021-02-08T16:34:38.000Z | 2022-03-25T19:16:09.000Z | careless/models/likelihoods/base.py | JBGreisman/careless | 8f6c0859973757d11b26b65d9dc51d443030aa70 | [
"MIT"
] | 28 | 2021-01-15T21:31:40.000Z | 2022-03-30T21:06:54.000Z | careless/models/likelihoods/base.py | JBGreisman/careless | 8f6c0859973757d11b26b65d9dc51d443030aa70 | [
"MIT"
] | 5 | 2021-02-12T18:43:58.000Z | 2022-02-02T21:38:56.000Z | from careless.models.base import BaseModel
class Likelihood(BaseModel):
def call(inputs):
raise NotImplementedError(
"Likelihoods must implement a call method that returns a `tfp.distribution.Distribution` "
"or a similar object with a `log_prob` method."
)
| 27.727273 | 102 | 0.678689 | from careless.models.base import BaseModel
class Likelihood(BaseModel):
def call(inputs):
raise NotImplementedError(
"Likelihoods must implement a call method that returns a `tfp.distribution.Distribution` "
"or a similar object with a `log_prob` method."
)
| true | true |
1c3aa996949639478b54efba258f458d4268313e | 888 | py | Python | petroflow/src/base_delegator.py | atwahsz/petroflow | 089ca9559a94a7b597cde94601999489ac43d5ec | [
"Apache-2.0"
] | 44 | 2019-10-15T07:03:14.000Z | 2022-01-11T09:09:24.000Z | petroflow/src/base_delegator.py | atwahsz/petroflow | 089ca9559a94a7b597cde94601999489ac43d5ec | [
"Apache-2.0"
] | 15 | 2019-10-18T07:51:45.000Z | 2022-03-10T21:17:16.000Z | petroflow/src/base_delegator.py | atwahsz/petroflow | 089ca9559a94a7b597cde94601999489ac43d5ec | [
"Apache-2.0"
] | 21 | 2019-10-30T14:21:54.000Z | 2022-03-23T16:19:34.000Z | """Implements base delegator - a metaclass that creates absent abstract
methods of `WellBatch` and `Well` classes."""
from abc import ABCMeta
class BaseDelegator(ABCMeta):
"""Base metaclass that searches for absent abstract methods and creates
them."""
def __new__(mcls, name, bases, namespace):
abstract_methods = [base.__abstractmethods__ for base in bases if hasattr(base, "__abstractmethods__")]
abstract_methods = frozenset().union(*abstract_methods)
for method in abstract_methods:
if method not in namespace:
mcls._create_method(method, namespace)
return super().__new__(mcls, name, bases, namespace)
@classmethod
def _create_method(mcls, method, namespace):
"""Create a method, absent in the `namespace`. Must be overridden in
child classes."""
raise NotImplementedError
| 37 | 111 | 0.69482 |
from abc import ABCMeta
class BaseDelegator(ABCMeta):
def __new__(mcls, name, bases, namespace):
abstract_methods = [base.__abstractmethods__ for base in bases if hasattr(base, "__abstractmethods__")]
abstract_methods = frozenset().union(*abstract_methods)
for method in abstract_methods:
if method not in namespace:
mcls._create_method(method, namespace)
return super().__new__(mcls, name, bases, namespace)
@classmethod
def _create_method(mcls, method, namespace):
raise NotImplementedError
| true | true |
1c3aa9b9d9095564eae877e1175073726df84e60 | 5,695 | py | Python | pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py | bouffalolab/pigweed | 455f460f729591c22c4581a962431c3712fe7ea6 | [
"Apache-2.0"
] | 1 | 2022-01-13T10:01:05.000Z | 2022-01-13T10:01:05.000Z | pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py | bouffalolab/pigweed | 455f460f729591c22c4581a962431c3712fe7ea6 | [
"Apache-2.0"
] | null | null | null | pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py | bouffalolab/pigweed | 455f460f729591c22c4581a962431c3712fe7ea6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Generates json trace files viewable using chrome://tracing using RPCs from a
connected HdlcRpcClient.
Example usage:
python pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py -s localhost:33000
-o trace.json
-t
out/pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_rpc
pw_trace_tokenized/pw_trace_protos/trace_rpc.proto
""" # pylint: disable=line-too-long
# pylint: enable=line-too-long
import argparse
import logging
import glob
from pathlib import Path
import sys
from typing import Collection, Iterable, Iterator
import serial # type: ignore
from pw_tokenizer import database
from pw_trace import trace
from pw_hdlc.rpc import HdlcRpcClient, default_channels
from pw_hdlc.rpc_console import SocketClientImpl
from pw_trace_tokenized import trace_tokenized
_LOG = logging.getLogger('pw_trace_tokenizer')
PW_RPC_MAX_PACKET_SIZE = 256
SOCKET_SERVER = 'localhost'
SOCKET_PORT = 33000
MKFIFO_MODE = 0o666
def _expand_globs(globs: Iterable[str]) -> Iterator[Path]:
for pattern in globs:
for file in glob.glob(pattern, recursive=True):
yield Path(file)
def get_hdlc_rpc_client(device: str, baudrate: int,
proto_globs: Collection[str], socket_addr: str,
**kwargs):
"""Get the HdlcRpcClient based on arguments."""
del kwargs # ignore
if not proto_globs:
proto_globs = ['**/*.proto']
protos = list(_expand_globs(proto_globs))
if not protos:
_LOG.critical('No .proto files were found with %s',
', '.join(proto_globs))
_LOG.critical('At least one .proto file is required')
return 1
_LOG.debug('Found %d .proto files found with %s', len(protos),
', '.join(proto_globs))
# TODO(rgoliver): When pw has a generalized transport for RPC this should
# use it so it isn't specific to HDLC
if socket_addr is None:
serial_device = serial.Serial(device, baudrate, timeout=1)
read = lambda: serial_device.read(8192)
write = serial_device.write
else:
try:
socket_device = SocketClientImpl(socket_addr)
read = socket_device.read
write = socket_device.write
except ValueError:
_LOG.exception('Failed to initialize socket at %s', socket_addr)
return 1
return HdlcRpcClient(read, protos, default_channels(write))
def get_trace_data_from_device(client):
"""Get the trace data using RPC from a Client"""
data = b''
service = client.client.channel(1).rpcs.pw.trace.TraceService
result = service.GetTraceData().responses
for streamed_data in result:
data = data + bytes([len(streamed_data.data)])
data = data + streamed_data.data
_LOG.debug(''.join(format(x, '02x') for x in streamed_data.data))
return data
def _parse_args():
"""Parse and return command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-d', '--device', help='the serial port to use')
parser.add_argument('-b',
'--baudrate',
type=int,
default=115200,
help='the baud rate to use')
group.add_argument('-s',
'--socket-addr',
type=str,
help='use socket to connect to server, type default for\
localhost:33000, or manually input the server address:port')
parser.add_argument('-o',
'--trace_output',
dest='trace_output_file',
help=('The json file to which to write the output.'))
parser.add_argument(
'-t',
'--trace_token_database',
help='Databases (ELF, binary, or CSV) to use to lookup trace tokens.')
parser.add_argument('proto_globs',
nargs='+',
help='glob pattern for .proto files')
parser.add_argument(
'-f',
'--ticks_per_second',
type=int,
dest='ticks_per_second',
default=1000,
help=('The clock rate of the trace events (Default 1000).'))
return parser.parse_args()
def _main(args):
token_database = \
database.load_token_database(args.trace_token_database, domain="trace")
_LOG.info(database.database_summary(token_database))
client = get_hdlc_rpc_client(**vars(args))
data = get_trace_data_from_device(client)
events = trace_tokenized.get_trace_events([token_database], data,
args.ticks_per_second)
json_lines = trace.generate_trace_json(events)
trace_tokenized.save_trace_file(json_lines, args.trace_output_file)
if __name__ == '__main__':
if sys.version_info[0] < 3:
sys.exit('ERROR: The detokenizer command line tools require Python 3.')
_main(_parse_args())
| 35.81761 | 87 | 0.659526 |
import argparse
import logging
import glob
from pathlib import Path
import sys
from typing import Collection, Iterable, Iterator
import serial
from pw_tokenizer import database
from pw_trace import trace
from pw_hdlc.rpc import HdlcRpcClient, default_channels
from pw_hdlc.rpc_console import SocketClientImpl
from pw_trace_tokenized import trace_tokenized
_LOG = logging.getLogger('pw_trace_tokenizer')
PW_RPC_MAX_PACKET_SIZE = 256
SOCKET_SERVER = 'localhost'
SOCKET_PORT = 33000
MKFIFO_MODE = 0o666
def _expand_globs(globs: Iterable[str]) -> Iterator[Path]:
for pattern in globs:
for file in glob.glob(pattern, recursive=True):
yield Path(file)
def get_hdlc_rpc_client(device: str, baudrate: int,
proto_globs: Collection[str], socket_addr: str,
**kwargs):
del kwargs
if not proto_globs:
proto_globs = ['**/*.proto']
protos = list(_expand_globs(proto_globs))
if not protos:
_LOG.critical('No .proto files were found with %s',
', '.join(proto_globs))
_LOG.critical('At least one .proto file is required')
return 1
_LOG.debug('Found %d .proto files found with %s', len(protos),
', '.join(proto_globs))
if socket_addr is None:
serial_device = serial.Serial(device, baudrate, timeout=1)
read = lambda: serial_device.read(8192)
write = serial_device.write
else:
try:
socket_device = SocketClientImpl(socket_addr)
read = socket_device.read
write = socket_device.write
except ValueError:
_LOG.exception('Failed to initialize socket at %s', socket_addr)
return 1
return HdlcRpcClient(read, protos, default_channels(write))
def get_trace_data_from_device(client):
data = b''
service = client.client.channel(1).rpcs.pw.trace.TraceService
result = service.GetTraceData().responses
for streamed_data in result:
data = data + bytes([len(streamed_data.data)])
data = data + streamed_data.data
_LOG.debug(''.join(format(x, '02x') for x in streamed_data.data))
return data
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-d', '--device', help='the serial port to use')
parser.add_argument('-b',
'--baudrate',
type=int,
default=115200,
help='the baud rate to use')
group.add_argument('-s',
'--socket-addr',
type=str,
help='use socket to connect to server, type default for\
localhost:33000, or manually input the server address:port')
parser.add_argument('-o',
'--trace_output',
dest='trace_output_file',
help=('The json file to which to write the output.'))
parser.add_argument(
'-t',
'--trace_token_database',
help='Databases (ELF, binary, or CSV) to use to lookup trace tokens.')
parser.add_argument('proto_globs',
nargs='+',
help='glob pattern for .proto files')
parser.add_argument(
'-f',
'--ticks_per_second',
type=int,
dest='ticks_per_second',
default=1000,
help=('The clock rate of the trace events (Default 1000).'))
return parser.parse_args()
def _main(args):
token_database = \
database.load_token_database(args.trace_token_database, domain="trace")
_LOG.info(database.database_summary(token_database))
client = get_hdlc_rpc_client(**vars(args))
data = get_trace_data_from_device(client)
events = trace_tokenized.get_trace_events([token_database], data,
args.ticks_per_second)
json_lines = trace.generate_trace_json(events)
trace_tokenized.save_trace_file(json_lines, args.trace_output_file)
if __name__ == '__main__':
if sys.version_info[0] < 3:
sys.exit('ERROR: The detokenizer command line tools require Python 3.')
_main(_parse_args())
| true | true |
1c3aabb2158795ae2257c0d560f1b866e0ca84a7 | 30,099 | py | Python | qq/http.py | foxwhite25/qq.py | 92e744205e57b4c8922aa5843095ae900b3c1d84 | [
"MIT"
] | 40 | 2021-12-07T02:18:14.000Z | 2022-03-28T13:14:16.000Z | qq/http.py | foxwhite25/qq.py | 92e744205e57b4c8922aa5843095ae900b3c1d84 | [
"MIT"
] | 2 | 2021-12-12T17:34:29.000Z | 2021-12-17T04:43:03.000Z | qq/http.py | foxwhite25/qq.py | 92e744205e57b4c8922aa5843095ae900b3c1d84 | [
"MIT"
] | 5 | 2021-12-10T11:17:41.000Z | 2022-03-05T13:53:50.000Z | # The MIT License (MIT)
# Copyright (c) 2021-present foxwhite25
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import annotations
import asyncio
import datetime
import logging
import sys
import weakref
from types import TracebackType
from typing import (
ClassVar, Any, Optional, Dict, Union, TypeVar, Type,
Coroutine, List, Tuple, TYPE_CHECKING, NamedTuple
)
from urllib.parse import quote as _uriquote
import aiohttp
from . import __version__, utils
from .embeds import Ark, Embed
from .error import HTTPException, Forbidden, NotFound, QQServerError, LoginFailure, GatewayNotFound
from .gateway import QQClientWebSocketResponse
from .types import user, guild, message, channel, member
from .types.message import Message
from .types.permission import (
Permission as PermissionPayload,
PermissionDemand as PermissionDemandPayload
)
from .types.role import (
WrappedRole as WrappedRolePayload,
Role as RolePayload
)
from .types.schedule import Schedule as SchedulePayload
from .utils import MISSING
if TYPE_CHECKING:
from .message import MessageReference
from .file import File
T = TypeVar('T')
BE = TypeVar('BE', bound=BaseException)
MU = TypeVar('MU', bound='MaybeUnlock')
Response = Coroutine[Any, Any, T]
_log = logging.getLogger(__name__)
__all__ = ('Route', 'HTTPClient')
class Route:
BASE: ClassVar[str] = 'https://api.sgroup.qq.com'
def __init__(self, method: str, path: str, **parameters: Any) -> None:
self.path: str = path
self.method: str = method
url = self.BASE + self.path
if parameters:
url = url.format_map({k: _uriquote(v) if isinstance(v, str) else v for k, v in parameters.items()})
self.url: str = url
# major parameters:
self.channel_id: Optional[str] = parameters.get('channel_id')
self.guild_id: Optional[str] = parameters.get('guild_id')
self.token: Optional[str] = parameters.get('token')
@property
def bucket(self) -> str:
# the bucket is just method + path w/ major parameters
return f'{self.channel_id}:{self.guild_id}:{self.path}'
async def json_or_text(response: aiohttp.ClientResponse) -> Union[Dict[str, Any], str]:
text = await response.text(encoding='utf-8')
try:
if response.headers['content-type'] == 'application/json':
return utils._from_json(text)
except Exception:
# Thanks Cloudflare
pass
return text
class MultipartParameters(NamedTuple):
direct: bool
payload: Optional[Dict[str, Any]]
multipart: Optional[Dict[str, Any]]
file: Optional[File]
def __enter__(self) -> MultipartParameters:
return self
def __exit__(
self,
exc_type: Optional[Type[BE]],
exc: Optional[BE],
traceback: Optional[TracebackType],
) -> None:
if self.file:
self.file.close()
def handle_message_parameters(
content: Optional[str] = MISSING,
direct: bool = False,
*,
msg_id: Optional[str] = MISSING,
file: File = MISSING,
image: Optional[str] = MISSING,
embed: Optional[Embed] = MISSING,
ark: Optional[Ark] = MISSING,
message_reference: Optional[MessageReference] = MISSING,
) -> MultipartParameters:
payload = {}
if msg_id:
payload['msg_id'] = msg_id
if embed:
payload['embed'] = embed.to_dict()
if ark:
payload['ark'] = ark
if image:
payload['image'] = image
if content is not MISSING:
if content is not None:
payload['content'] = str(content)
else:
pass
if content:
payload['content'] = content.replace(".", "\ufeff.")
if message_reference is not MISSING:
payload['message_reference'] = message_reference
multipart = {}
if file:
for key, value in payload.items():
multipart[key] = value
payload = None
multipart['file_image'] = file.fp
return MultipartParameters(direct=direct, payload=payload, multipart=multipart, file=file)
class MaybeUnlock:
def __init__(self, lock: asyncio.Lock) -> None:
self.lock: asyncio.Lock = lock
self._unlock: bool = True
def __enter__(self: MU) -> MU:
return self
def defer(self) -> None:
self._unlock = False
def __exit__(
self,
exc_type: Optional[Type[BE]],
exc: Optional[BE],
traceback: Optional[TracebackType],
) -> None:
if self._unlock:
self.lock.release()
class HTTPClient:
"""Represents an HTTP client sending HTTP requests to the QQ API."""
def __init__(
self,
connector: Optional[aiohttp.BaseConnector] = None,
*,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
unsync_clock: bool = True,
) -> None:
self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop() if loop is None else loop
self.connector = connector
self.__session: aiohttp.ClientSession = MISSING # filled in static_login
self._locks: weakref.WeakValueDictionary = weakref.WeakValueDictionary()
self._global_over: asyncio.Event = asyncio.Event()
self._global_over.set()
self.token: Optional[str] = None
self.bot_token: bool = False
self.proxy: Optional[str] = proxy
self.proxy_auth: Optional[aiohttp.BasicAuth] = proxy_auth
self.use_clock: bool = not unsync_clock
user_agent = "QQBot (https://github.com/foxwhite25/qq.py {0}) Python/{1[0]}.{1[1]} aiohttp/{2}"
self.user_agent: str = user_agent.format(__version__, sys.version_info, aiohttp.__version__)
async def request(
self,
route: Route,
*,
file: Optional[File] = None,
form: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
bucket = route.bucket
method = route.method
url = route.url
lock = self._locks.get(bucket)
if lock is None:
lock = asyncio.Lock()
if bucket is not None:
self._locks[bucket] = lock
headers: Dict[str, str] = {
'User-Agent': self.user_agent,
}
# Add token to header
if self.token is not None:
headers['Authorization'] = 'Bot ' + self.token
# Checking if it's a JSON request
if 'json' in kwargs:
headers['Content-Type'] = 'application/json'
kwargs['data'] = utils._to_json(kwargs.pop('json'))
kwargs['headers'] = headers
# Proxy support
if self.proxy is not None:
kwargs['proxy'] = self.proxy
if self.proxy_auth is not None:
kwargs['proxy_auth'] = self.proxy_auth
if not self._global_over.is_set():
# wait until the global lock is complete
await self._global_over.wait()
try:
reason = kwargs.pop('reason')
except KeyError:
pass
else:
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
response: Optional[aiohttp.ClientResponse] = None
data: Optional[Union[Dict[str, Any], str]] = None
await lock.acquire()
with MaybeUnlock(lock) as maybe_lock:
for tries in range(5):
if file:
file.reset(seek=tries)
if form:
form_data = aiohttp.FormData()
for key, value in form.items():
form_data.add_field(key, value)
kwargs['data'] = form_data
try:
async with self.__session.request(method, url, **kwargs) as response:
_log.debug('%s %s 与 %s 已返回 %s Trace ID: %s', method, url, kwargs.get('data'),
response.status, response.headers['X-Tps-trace-ID'])
# even errors have text involved in them so this is safe to call
data = await json_or_text(response)
# the request was successful so just return the text/json
if 300 > response.status >= 200:
if response.status != 204:
_log.debug('%s %s 已收到 %s', method, url, data)
return data
# we've received a 500, 502, or 504, unconditional retry
# if response.status in (500, 502, 504):
# await asyncio.sleep(1 + tries * 2)
# continue
# the usual error cases
if response.status in [404, 403, 401]:
raise Forbidden(response, data, route=route)
elif response.status == 404:
raise NotFound(response, data, route=route)
elif response.status >= 500:
raise QQServerError(response, data, route=route)
else:
raise HTTPException(response, data, route=route)
# This is handling exceptions from the request
except OSError as e:
# Connection reset by peer
if tries < 4 and e.errno in (54, 10054):
await asyncio.sleep(1 + tries * 2)
continue
raise
except QQServerError as e:
if tries < 4 and e.code in (620006,):
await asyncio.sleep(1 + tries * 2)
continue
raise e
if response is not None:
# We've run out of retries, raise.
if response.status >= 500:
raise QQServerError(response, data)
raise HTTPException(response, data)
raise RuntimeError('HTTP 处理中无法访问的代码')
async def static_login(self, token: str) -> user.User:
# Necessary to get aiohttp to stop complaining about session creation
self.__session = aiohttp.ClientSession(connector=self.connector, ws_response_class=QQClientWebSocketResponse)
old_token = self.token
self.token = token
try:
data = await self.request(Route('GET', '/users/@me'))
except HTTPException as exc:
self.token = old_token
if exc.status == 401:
raise LoginFailure('传递了不正确的令牌。') from exc
raise
return data
def get_guilds(
self,
limit: int = 100,
before: Optional[str] = None,
after: Optional[str] = None,
) -> Response[List[guild.Guild]]:
params: Dict[str, Any] = {
'limit': limit,
}
if before:
params['before'] = before
if after:
params['after'] = after
return self.request(Route('GET', '/users/@me/guilds'), params=params)
def get_guild(self, guild_id: int) -> Response[guild.Guild]:
return self.request(Route('GET', '/guilds/{guild_id}', guild_id=guild_id))
def get_guild_channels(self, guild_id: int) -> Response[guild.Guild]:
return self.request(Route('GET', '/guilds/{guild_id}/channels', guild_id=guild_id))
def get_message(self, channel_id: int, message_id: int) -> Response[Message]:
r = Route('GET', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r)
# 身份组管理
def get_roles(self, guild_id: int) -> Response[List[RolePayload]]:
return self.request(Route('GET', '/guilds/{guild_id}/roles', guild_id=guild_id))
def edit_role(
self, guild_id: int, role_id: int, *, reason: Optional[str] = None, **fields: Any
) -> Response[RolePayload]:
r = Route('PATCH', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
valid_keys = ('name', 'color', 'hoist')
payload = {"info": {k: v for k, v in fields.items() if k in valid_keys}}
return self.request(r, json=payload, reason=reason)
def delete_role(self, guild_id: int, role_id: int, *, reason: Optional[str] = None) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
return self.request(r, reason=reason)
def create_role(
self, guild_id: int, *, reason: Optional[str] = None, **fields: Any
) -> Response[WrappedRolePayload]:
r = Route('POST', '/guilds/{guild_id}/roles', guild_id=guild_id)
return self.request(r, json=fields, reason=reason)
def add_role(
self, guild_id: int, user_id: int, role_id: int, channel_id: Optional[int], *, reason: Optional[str] = None
) -> Response[None]:
r = Route(
'PUT',
'/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id,
user_id=user_id,
role_id=role_id,
)
if channel_id:
payload = {'channel': {'id': str(channel_id)}}
return self.request(r, json=payload, reason=reason)
return self.request(r, reason=reason)
def remove_role(
self, guild_id: int, user_id: int, role_id: int, channel_id: Optional[int], *, reason: Optional[str] = None
) -> Response[None]:
r = Route(
'DELETE',
'/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id,
user_id=user_id,
role_id=role_id,
)
if channel_id:
payload = {'channel': {'id': str(channel_id)}}
return self.request(r, json=payload, reason=reason)
return self.request(r, reason=reason)
def create_channel(
self,
guild_id: int,
channel_type: channel.ChannelType,
*,
reason: Optional[str] = None,
**options: Any,
) -> Response[channel.GuildChannel]:
payload = {
'type': channel_type,
}
valid_keys = (
'name',
'parent_id',
'position',
)
payload.update({k: str(v) for k, v in options.items() if k in valid_keys and v is not None})
return self.request(Route('POST', '/guilds/{guild_id}/channels', guild_id=guild_id), json=payload,
reason=reason)
def edit_channel(
self,
channel_id: int,
*,
reason: Optional[str] = None,
**options: Any,
) -> Response[channel.Channel]:
r = Route('PATCH', '/channels/{channel_id}', channel_id=channel_id)
valid_keys = (
'name',
'parent_id',
'position',
'type',
)
payload = {k: v for k, v in options.items() if k in valid_keys}
return self.request(r, reason=reason, json=payload)
def bulk_channel_update(
self,
guild_id: int,
datas: List[guild.ChannelPositionUpdate],
*,
reason: Optional[str] = None,
) -> List[Response[None]]:
rsp = []
for data in datas:
valid_keys = (
'name',
'parent_id',
'position',
'type',
)
payload = {k: v for k, v in data.items() if k in valid_keys}
r = Route('PATCH', '/channels/{channel_id}', channel_id=data.get('id'))
rsp.append(self.request(r, reason=reason, json=payload))
return rsp
def delete_channel(
self,
channel_id: int,
*,
reason: Optional[str] = None,
) -> Response[None]:
return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id), reason=reason)
async def get_from_cdn(self, url: str) -> bytes:
async with self.__session.get(url) as resp:
if resp.status == 200:
return await resp.read()
elif resp.status == 404:
raise NotFound(resp, 'asset not found')
elif resp.status == 403:
raise Forbidden(resp, 'cannot retrieve asset')
else:
raise HTTPException(resp, 'failed to get asset')
async def get_gateway(self, *, encoding: str = 'json', zlib: bool = True) -> str:
try:
data = await self.request(Route('GET', '/gateway'))
except HTTPException as exc:
raise GatewayNotFound() from exc
return data['url']
async def get_bot_gateway(self, *, encoding: str = 'json', zlib: bool = True) -> Tuple[int, str]:
try:
data = await self.request(Route('GET', '/gateway/bot'))
except HTTPException as exc:
raise GatewayNotFound() from exc
if zlib:
value = '{0}?encoding={1}&v=9&compress=zlib-stream'
else:
value = '{0}?encoding={1}&v=9'
return data['shards'], value.format(data['url'], encoding)
def recreate(self) -> None:
if self.__session.closed:
self.__session = aiohttp.ClientSession(
connector=self.connector, ws_response_class=QQClientWebSocketResponse
)
async def close(self) -> None:
if self.__session:
await self.__session.close()
async def ws_connect(self, url: str, *, compress: int = 0) -> Any:
kwargs = {
'proxy_auth': self.proxy_auth,
'proxy': self.proxy,
'max_msg_size': 0,
'timeout': 30.0,
'autoclose': False,
'headers': {
'User-Agent': self.user_agent,
},
'compress': compress,
}
return await self.__session.ws_connect(url, **kwargs)
def start_private_message(self, user_id: int, guild_id: int) -> Response[channel.DMChannel]:
payload = {
'recipient_id': str(user_id),
'source_guild_id': str(guild_id),
}
return self.request(Route('POST', '/users/@me/dms'), json=payload)
def send_message(
self,
channel_id: int,
*,
params: MultipartParameters,
) -> Response[message.Message]:
r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id) if not params.direct else \
Route('POST', '/dms/{guild_id}/messages', guild_id=channel_id)
if params.file:
return self.request(r, file=params.file, form=params.multipart)
else:
return self.request(r, json=params.payload)
def get_members(
self, guild_id: int, limit: int, after: Optional[int] = None
) -> Response[List[member.MemberWithUser]]:
params: Dict[str, Any] = {
'limit': limit,
}
if after:
params['after'] = after
r = Route('GET', '/guilds/{guild_id}/members', guild_id=guild_id)
return self.request(r, params=params)
def get_member(self, guild_id: int, member_id: int) -> Response[member.MemberWithUser]:
return self.request(
Route('GET', '/guilds/{guild_id}/members/{member_id}', guild_id=guild_id, member_id=member_id))
def get_channel(self, channel_id: int) -> Response[channel.Channel]:
r = Route('GET', '/channels/{channel_id}', channel_id=channel_id)
return self.request(r)
def kick(self, user_id: int, guild_id: int, add_blacklist: bool, reason: Optional[str] = None) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
payload: Dict[str, Any] = {
'add_blacklist': add_blacklist,
}
if reason:
# thanks aiohttp
r.url = f'{r.url}?reason={_uriquote(reason)}'
return self.request(r, json=payload)
def create_schedule(
self,
channel_id: int,
name: str,
start_timestamp: Union[datetime.datetime, float],
end_timestamp: Union[datetime.datetime, float],
jump_channel_id: int,
remind_type: str,
description: Optional[str],
reason: Optional[str] = None
) -> Response[SchedulePayload]:
payload: Dict[str, Any] = {
"schedule": {
"name": name,
"start_timestamp": str(int(start_timestamp * 1000)) if isinstance(start_timestamp, float)
else str(int(start_timestamp.timestamp() * 1000)),
"end_timestamp": str(int(end_timestamp * 1000)) if isinstance(end_timestamp, float)
else str(int(end_timestamp.timestamp() * 1000)),
"jump_channel_id": str(jump_channel_id),
"remind_type": remind_type
}
}
if description is not None:
payload["schedule"]["description"] = description
r = Route('POST', '/channels/{channel_id}/schedules', channel_id=channel_id)
return self.request(r, json=payload, reason=reason)
def remove_schedule(self, channel_id: int, schedule_id: int, reason: Optional[str] = None) -> Response[None]:
r = Route(
'DELEtE', '/channels/{channel_id}/schedules/{schedule_id}',
channel_id=channel_id,
schedule_id=schedule_id
)
return self.request(r, reason=reason)
def mute_member(
self, user_id: int, guild_id: int, duration: Union[datetime.datetime, int], reason: Optional[str] = None
) -> Response[None]:
payload: Dict[str, Any] = {}
if isinstance(duration, datetime.datetime):
payload['mute_end_timestamp'] = str(int(duration.timestamp() * 1000))
else:
payload['mute_seconds'] = str(duration)
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}/mute', guild_id=guild_id, user_id=user_id)
return self.request(r, json=payload, reason=reason)
def mute_members(
self, user_id: List[int],
guild_id: int,
duration: Union[datetime.datetime, int],
reason: Optional[str] = None
) -> Response[None]:
payload: Dict[str, Any] = {'user_ids': user_id}
if isinstance(duration, datetime.datetime):
payload['mute_end_timestamp'] = str(int(duration.timestamp() * 1000))
else:
payload['mute_seconds'] = str(duration)
r = Route('PATCH', '/guilds/{guild_id}/mute', guild_id=guild_id)
return self.request(r, json=payload, reason=reason)
def mute_guild(
self, guild_id: int, duration: Union[datetime.datetime, int], reason: Optional[str] = None
) -> Response[None]:
payload: Dict[str, Any] = {}
if isinstance(duration, datetime.datetime):
payload['mute_end_timestamp'] = str(int(duration.timestamp() * 1000))
else:
payload['mute_seconds'] = str(duration)
r = Route('PATCH', '/guilds/{guild_id}/mute', guild_id=guild_id)
return self.request(r, json=payload, reason=reason)
def delete_message(
self, channel_id: int, message_id: str, hidetip: bool, *, reason: Optional[str] = None,
) -> Response[None]:
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id,
message_id=message_id)
params = {'hidetip': 'true' if hidetip else 'false'}
return self.request(r, reason=reason, params=params)
def delete_messages(
self, channel_id: int, message_ids: List[str], *, reason: Optional[str] = None
) -> Response[None]:
r = Route('POST', '/channels/{channel_id}/messages/bulk-delete', channel_id=channel_id)
payload = {
'messages': message_ids,
}
return self.request(r, json=payload, reason=reason)
def logs_from(
self,
channel_id: int,
limit: int,
before: Optional[datetime.datetime] = None,
after: Optional[datetime.datetime] = None,
around: Optional[datetime.datetime] = None,
) -> Response[List[message.Message]]:
params: Dict[str, Any] = {
'limit': limit,
}
if before is not None:
params['before'] = datetime.datetime.timestamp(before)
if after is not None:
params['after'] = datetime.datetime.timestamp(after)
if around is not None:
params['around'] = datetime.datetime.timestamp(around)
return self.request(Route('GET', '/channels/{channel_id}/messages', channel_id=channel_id), params=params)
def get_permission(self, guild_id: int) -> Response[List[PermissionPayload]]:
return self.request(Route('GET', '/guilds/{guild_id}/api_permission', guild_id=guild_id))
def demand_permission(
self,
guild_id: int,
channel_id: int,
desc: str,
path: str,
method: str
) -> Response[PermissionDemandPayload]:
payload: Dict[str, Any] = {
"channel_id": str(channel_id),
"api_identify": {"path": path, "method": method},
"desc": desc
}
return self.request(Route('POST', '/guilds/{guild_id}/api_permission/demand', guild_id=guild_id), json=payload)
def global_pin_message(
self,
guild_id: int,
channel_id: Optional[int] = None,
message_id: Optional[str] = None,
announces_type: Optional[int] = None,
recommend_channels: Optional[Dict[int, str]] = None,
reason: Optional[str] = None
):
r = Route(
'POST',
'/guilds/{guild_id}/announces',
guild_id=guild_id,
)
if recommend_channels:
recommend_channels: List[Dict[str, str]] = [
{
'channel_id': str(m),
'introduce': n
} for m, n in recommend_channels.items()
]
payload: Dict[str, Any] = {}
if channel_id is not None:
payload['channel_id'] = channel_id
if message_id is not None:
payload['message_id'] = message_id
if announces_type is not None:
payload['announces_types'] = announces_type
if recommend_channels is not None:
payload['recommend_channels'] = recommend_channels
return self.request(r, json=payload, reason=reason)
def global_unpin_message(self, guild_id: int, message_id: str, reason: Optional[str] = None):
r = Route(
'DELETE',
'/guilds/{guild_id}/announces/{message_id}',
guild_id=guild_id,
message_id=message_id
)
return self.request(r, reason=reason)
def channel_pin_message(self, channel_id: int, message_id: str, reason: Optional[str] = None):
r = Route(
'POST',
'/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id,
message_id=message_id,
)
return self.request(r, reason=reason)
def channel_unpin_message(self, channel_id: int, message_id: str, reason: Optional[str] = None):
r = Route(
'DELETE',
'/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id,
message_id=message_id
)
return self.request(r, reason=reason)
def add_reaction(
self, channel_id: int, message_id: str, custom: bool, id: int
) -> Response[None]:
r = Route(
'PUT',
'/channels/{channel_id}/messages/{message_id}/reactions/{type}/{id}',
channel_id=channel_id,
message_id=message_id,
type=1 if custom else 2,
id=id
)
return self.request(r)
def remove_reaction(
self, channel_id: int, message_id: str, custom: bool, id: int
) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/{member_id}',
channel_id=channel_id,
message_id=message_id,
type=1 if custom else 2,
id=id
)
return self.request(r)
def send_guide(
self,
channel_id: int,
content: str,
):
r = Route(
'POST',
'/channels/{channel_id}/settingguide',
channel_id=channel_id
)
payload = {'content': content}
return self.request(r, json=payload)
| 36.089928 | 119 | 0.573973 |
from __future__ import annotations
import asyncio
import datetime
import logging
import sys
import weakref
from types import TracebackType
from typing import (
ClassVar, Any, Optional, Dict, Union, TypeVar, Type,
Coroutine, List, Tuple, TYPE_CHECKING, NamedTuple
)
from urllib.parse import quote as _uriquote
import aiohttp
from . import __version__, utils
from .embeds import Ark, Embed
from .error import HTTPException, Forbidden, NotFound, QQServerError, LoginFailure, GatewayNotFound
from .gateway import QQClientWebSocketResponse
from .types import user, guild, message, channel, member
from .types.message import Message
from .types.permission import (
Permission as PermissionPayload,
PermissionDemand as PermissionDemandPayload
)
from .types.role import (
WrappedRole as WrappedRolePayload,
Role as RolePayload
)
from .types.schedule import Schedule as SchedulePayload
from .utils import MISSING
if TYPE_CHECKING:
from .message import MessageReference
from .file import File
T = TypeVar('T')
BE = TypeVar('BE', bound=BaseException)
MU = TypeVar('MU', bound='MaybeUnlock')
Response = Coroutine[Any, Any, T]
_log = logging.getLogger(__name__)
__all__ = ('Route', 'HTTPClient')
class Route:
BASE: ClassVar[str] = 'https://api.sgroup.qq.com'
def __init__(self, method: str, path: str, **parameters: Any) -> None:
self.path: str = path
self.method: str = method
url = self.BASE + self.path
if parameters:
url = url.format_map({k: _uriquote(v) if isinstance(v, str) else v for k, v in parameters.items()})
self.url: str = url
self.channel_id: Optional[str] = parameters.get('channel_id')
self.guild_id: Optional[str] = parameters.get('guild_id')
self.token: Optional[str] = parameters.get('token')
@property
def bucket(self) -> str:
return f'{self.channel_id}:{self.guild_id}:{self.path}'
async def json_or_text(response: aiohttp.ClientResponse) -> Union[Dict[str, Any], str]:
text = await response.text(encoding='utf-8')
try:
if response.headers['content-type'] == 'application/json':
return utils._from_json(text)
except Exception:
pass
return text
class MultipartParameters(NamedTuple):
direct: bool
payload: Optional[Dict[str, Any]]
multipart: Optional[Dict[str, Any]]
file: Optional[File]
def __enter__(self) -> MultipartParameters:
return self
def __exit__(
self,
exc_type: Optional[Type[BE]],
exc: Optional[BE],
traceback: Optional[TracebackType],
) -> None:
if self.file:
self.file.close()
def handle_message_parameters(
content: Optional[str] = MISSING,
direct: bool = False,
*,
msg_id: Optional[str] = MISSING,
file: File = MISSING,
image: Optional[str] = MISSING,
embed: Optional[Embed] = MISSING,
ark: Optional[Ark] = MISSING,
message_reference: Optional[MessageReference] = MISSING,
) -> MultipartParameters:
payload = {}
if msg_id:
payload['msg_id'] = msg_id
if embed:
payload['embed'] = embed.to_dict()
if ark:
payload['ark'] = ark
if image:
payload['image'] = image
if content is not MISSING:
if content is not None:
payload['content'] = str(content)
else:
pass
if content:
payload['content'] = content.replace(".", "\ufeff.")
if message_reference is not MISSING:
payload['message_reference'] = message_reference
multipart = {}
if file:
for key, value in payload.items():
multipart[key] = value
payload = None
multipart['file_image'] = file.fp
return MultipartParameters(direct=direct, payload=payload, multipart=multipart, file=file)
class MaybeUnlock:
def __init__(self, lock: asyncio.Lock) -> None:
self.lock: asyncio.Lock = lock
self._unlock: bool = True
def __enter__(self: MU) -> MU:
return self
def defer(self) -> None:
self._unlock = False
def __exit__(
self,
exc_type: Optional[Type[BE]],
exc: Optional[BE],
traceback: Optional[TracebackType],
) -> None:
if self._unlock:
self.lock.release()
class HTTPClient:
def __init__(
self,
connector: Optional[aiohttp.BaseConnector] = None,
*,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
unsync_clock: bool = True,
) -> None:
self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop() if loop is None else loop
self.connector = connector
self.__session: aiohttp.ClientSession = MISSING
self._locks: weakref.WeakValueDictionary = weakref.WeakValueDictionary()
self._global_over: asyncio.Event = asyncio.Event()
self._global_over.set()
self.token: Optional[str] = None
self.bot_token: bool = False
self.proxy: Optional[str] = proxy
self.proxy_auth: Optional[aiohttp.BasicAuth] = proxy_auth
self.use_clock: bool = not unsync_clock
user_agent = "QQBot (https://github.com/foxwhite25/qq.py {0}) Python/{1[0]}.{1[1]} aiohttp/{2}"
self.user_agent: str = user_agent.format(__version__, sys.version_info, aiohttp.__version__)
async def request(
self,
route: Route,
*,
file: Optional[File] = None,
form: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
bucket = route.bucket
method = route.method
url = route.url
lock = self._locks.get(bucket)
if lock is None:
lock = asyncio.Lock()
if bucket is not None:
self._locks[bucket] = lock
headers: Dict[str, str] = {
'User-Agent': self.user_agent,
}
if self.token is not None:
headers['Authorization'] = 'Bot ' + self.token
if 'json' in kwargs:
headers['Content-Type'] = 'application/json'
kwargs['data'] = utils._to_json(kwargs.pop('json'))
kwargs['headers'] = headers
# Proxy support
if self.proxy is not None:
kwargs['proxy'] = self.proxy
if self.proxy_auth is not None:
kwargs['proxy_auth'] = self.proxy_auth
if not self._global_over.is_set():
# wait until the global lock is complete
await self._global_over.wait()
try:
reason = kwargs.pop('reason')
except KeyError:
pass
else:
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
response: Optional[aiohttp.ClientResponse] = None
data: Optional[Union[Dict[str, Any], str]] = None
await lock.acquire()
with MaybeUnlock(lock) as maybe_lock:
for tries in range(5):
if file:
file.reset(seek=tries)
if form:
form_data = aiohttp.FormData()
for key, value in form.items():
form_data.add_field(key, value)
kwargs['data'] = form_data
try:
async with self.__session.request(method, url, **kwargs) as response:
_log.debug('%s %s 与 %s 已返回 %s Trace ID: %s', method, url, kwargs.get('data'),
response.status, response.headers['X-Tps-trace-ID'])
# even errors have text involved in them so this is safe to call
data = await json_or_text(response)
# the request was successful so just return the text/json
if 300 > response.status >= 200:
if response.status != 204:
_log.debug('%s %s 已收到 %s', method, url, data)
return data
# we've received a 500, 502, or 504, unconditional retry
if response.status in [404, 403, 401]:
raise Forbidden(response, data, route=route)
elif response.status == 404:
raise NotFound(response, data, route=route)
elif response.status >= 500:
raise QQServerError(response, data, route=route)
else:
raise HTTPException(response, data, route=route)
except OSError as e:
if tries < 4 and e.errno in (54, 10054):
await asyncio.sleep(1 + tries * 2)
continue
raise
except QQServerError as e:
if tries < 4 and e.code in (620006,):
await asyncio.sleep(1 + tries * 2)
continue
raise e
if response is not None:
if response.status >= 500:
raise QQServerError(response, data)
raise HTTPException(response, data)
raise RuntimeError('HTTP 处理中无法访问的代码')
async def static_login(self, token: str) -> user.User:
# Necessary to get aiohttp to stop complaining about session creation
self.__session = aiohttp.ClientSession(connector=self.connector, ws_response_class=QQClientWebSocketResponse)
old_token = self.token
self.token = token
try:
data = await self.request(Route('GET', '/users/@me'))
except HTTPException as exc:
self.token = old_token
if exc.status == 401:
raise LoginFailure('传递了不正确的令牌。') from exc
raise
return data
def get_guilds(
self,
limit: int = 100,
before: Optional[str] = None,
after: Optional[str] = None,
) -> Response[List[guild.Guild]]:
params: Dict[str, Any] = {
'limit': limit,
}
if before:
params['before'] = before
if after:
params['after'] = after
return self.request(Route('GET', '/users/@me/guilds'), params=params)
def get_guild(self, guild_id: int) -> Response[guild.Guild]:
return self.request(Route('GET', '/guilds/{guild_id}', guild_id=guild_id))
def get_guild_channels(self, guild_id: int) -> Response[guild.Guild]:
return self.request(Route('GET', '/guilds/{guild_id}/channels', guild_id=guild_id))
def get_message(self, channel_id: int, message_id: int) -> Response[Message]:
r = Route('GET', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r)
# 身份组管理
def get_roles(self, guild_id: int) -> Response[List[RolePayload]]:
return self.request(Route('GET', '/guilds/{guild_id}/roles', guild_id=guild_id))
def edit_role(
self, guild_id: int, role_id: int, *, reason: Optional[str] = None, **fields: Any
) -> Response[RolePayload]:
r = Route('PATCH', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
valid_keys = ('name', 'color', 'hoist')
payload = {"info": {k: v for k, v in fields.items() if k in valid_keys}}
return self.request(r, json=payload, reason=reason)
def delete_role(self, guild_id: int, role_id: int, *, reason: Optional[str] = None) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
return self.request(r, reason=reason)
def create_role(
self, guild_id: int, *, reason: Optional[str] = None, **fields: Any
) -> Response[WrappedRolePayload]:
r = Route('POST', '/guilds/{guild_id}/roles', guild_id=guild_id)
return self.request(r, json=fields, reason=reason)
def add_role(
self, guild_id: int, user_id: int, role_id: int, channel_id: Optional[int], *, reason: Optional[str] = None
) -> Response[None]:
r = Route(
'PUT',
'/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id,
user_id=user_id,
role_id=role_id,
)
if channel_id:
payload = {'channel': {'id': str(channel_id)}}
return self.request(r, json=payload, reason=reason)
return self.request(r, reason=reason)
def remove_role(
self, guild_id: int, user_id: int, role_id: int, channel_id: Optional[int], *, reason: Optional[str] = None
) -> Response[None]:
r = Route(
'DELETE',
'/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id,
user_id=user_id,
role_id=role_id,
)
if channel_id:
payload = {'channel': {'id': str(channel_id)}}
return self.request(r, json=payload, reason=reason)
return self.request(r, reason=reason)
def create_channel(
self,
guild_id: int,
channel_type: channel.ChannelType,
*,
reason: Optional[str] = None,
**options: Any,
) -> Response[channel.GuildChannel]:
payload = {
'type': channel_type,
}
valid_keys = (
'name',
'parent_id',
'position',
)
payload.update({k: str(v) for k, v in options.items() if k in valid_keys and v is not None})
return self.request(Route('POST', '/guilds/{guild_id}/channels', guild_id=guild_id), json=payload,
reason=reason)
def edit_channel(
self,
channel_id: int,
*,
reason: Optional[str] = None,
**options: Any,
) -> Response[channel.Channel]:
r = Route('PATCH', '/channels/{channel_id}', channel_id=channel_id)
valid_keys = (
'name',
'parent_id',
'position',
'type',
)
payload = {k: v for k, v in options.items() if k in valid_keys}
return self.request(r, reason=reason, json=payload)
def bulk_channel_update(
self,
guild_id: int,
datas: List[guild.ChannelPositionUpdate],
*,
reason: Optional[str] = None,
) -> List[Response[None]]:
rsp = []
for data in datas:
valid_keys = (
'name',
'parent_id',
'position',
'type',
)
payload = {k: v for k, v in data.items() if k in valid_keys}
r = Route('PATCH', '/channels/{channel_id}', channel_id=data.get('id'))
rsp.append(self.request(r, reason=reason, json=payload))
return rsp
def delete_channel(
self,
channel_id: int,
*,
reason: Optional[str] = None,
) -> Response[None]:
return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id), reason=reason)
async def get_from_cdn(self, url: str) -> bytes:
async with self.__session.get(url) as resp:
if resp.status == 200:
return await resp.read()
elif resp.status == 404:
raise NotFound(resp, 'asset not found')
elif resp.status == 403:
raise Forbidden(resp, 'cannot retrieve asset')
else:
raise HTTPException(resp, 'failed to get asset')
async def get_gateway(self, *, encoding: str = 'json', zlib: bool = True) -> str:
try:
data = await self.request(Route('GET', '/gateway'))
except HTTPException as exc:
raise GatewayNotFound() from exc
return data['url']
async def get_bot_gateway(self, *, encoding: str = 'json', zlib: bool = True) -> Tuple[int, str]:
try:
data = await self.request(Route('GET', '/gateway/bot'))
except HTTPException as exc:
raise GatewayNotFound() from exc
if zlib:
value = '{0}?encoding={1}&v=9&compress=zlib-stream'
else:
value = '{0}?encoding={1}&v=9'
return data['shards'], value.format(data['url'], encoding)
def recreate(self) -> None:
if self.__session.closed:
self.__session = aiohttp.ClientSession(
connector=self.connector, ws_response_class=QQClientWebSocketResponse
)
async def close(self) -> None:
if self.__session:
await self.__session.close()
async def ws_connect(self, url: str, *, compress: int = 0) -> Any:
kwargs = {
'proxy_auth': self.proxy_auth,
'proxy': self.proxy,
'max_msg_size': 0,
'timeout': 30.0,
'autoclose': False,
'headers': {
'User-Agent': self.user_agent,
},
'compress': compress,
}
return await self.__session.ws_connect(url, **kwargs)
def start_private_message(self, user_id: int, guild_id: int) -> Response[channel.DMChannel]:
payload = {
'recipient_id': str(user_id),
'source_guild_id': str(guild_id),
}
return self.request(Route('POST', '/users/@me/dms'), json=payload)
def send_message(
self,
channel_id: int,
*,
params: MultipartParameters,
) -> Response[message.Message]:
r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id) if not params.direct else \
Route('POST', '/dms/{guild_id}/messages', guild_id=channel_id)
if params.file:
return self.request(r, file=params.file, form=params.multipart)
else:
return self.request(r, json=params.payload)
def get_members(
self, guild_id: int, limit: int, after: Optional[int] = None
) -> Response[List[member.MemberWithUser]]:
params: Dict[str, Any] = {
'limit': limit,
}
if after:
params['after'] = after
r = Route('GET', '/guilds/{guild_id}/members', guild_id=guild_id)
return self.request(r, params=params)
def get_member(self, guild_id: int, member_id: int) -> Response[member.MemberWithUser]:
return self.request(
Route('GET', '/guilds/{guild_id}/members/{member_id}', guild_id=guild_id, member_id=member_id))
def get_channel(self, channel_id: int) -> Response[channel.Channel]:
r = Route('GET', '/channels/{channel_id}', channel_id=channel_id)
return self.request(r)
def kick(self, user_id: int, guild_id: int, add_blacklist: bool, reason: Optional[str] = None) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
payload: Dict[str, Any] = {
'add_blacklist': add_blacklist,
}
if reason:
# thanks aiohttp
r.url = f'{r.url}?reason={_uriquote(reason)}'
return self.request(r, json=payload)
def create_schedule(
self,
channel_id: int,
name: str,
start_timestamp: Union[datetime.datetime, float],
end_timestamp: Union[datetime.datetime, float],
jump_channel_id: int,
remind_type: str,
description: Optional[str],
reason: Optional[str] = None
) -> Response[SchedulePayload]:
payload: Dict[str, Any] = {
"schedule": {
"name": name,
"start_timestamp": str(int(start_timestamp * 1000)) if isinstance(start_timestamp, float)
else str(int(start_timestamp.timestamp() * 1000)),
"end_timestamp": str(int(end_timestamp * 1000)) if isinstance(end_timestamp, float)
else str(int(end_timestamp.timestamp() * 1000)),
"jump_channel_id": str(jump_channel_id),
"remind_type": remind_type
}
}
if description is not None:
payload["schedule"]["description"] = description
r = Route('POST', '/channels/{channel_id}/schedules', channel_id=channel_id)
return self.request(r, json=payload, reason=reason)
def remove_schedule(self, channel_id: int, schedule_id: int, reason: Optional[str] = None) -> Response[None]:
r = Route(
'DELEtE', '/channels/{channel_id}/schedules/{schedule_id}',
channel_id=channel_id,
schedule_id=schedule_id
)
return self.request(r, reason=reason)
def mute_member(
self, user_id: int, guild_id: int, duration: Union[datetime.datetime, int], reason: Optional[str] = None
) -> Response[None]:
payload: Dict[str, Any] = {}
if isinstance(duration, datetime.datetime):
payload['mute_end_timestamp'] = str(int(duration.timestamp() * 1000))
else:
payload['mute_seconds'] = str(duration)
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}/mute', guild_id=guild_id, user_id=user_id)
return self.request(r, json=payload, reason=reason)
def mute_members(
self, user_id: List[int],
guild_id: int,
duration: Union[datetime.datetime, int],
reason: Optional[str] = None
) -> Response[None]:
payload: Dict[str, Any] = {'user_ids': user_id}
if isinstance(duration, datetime.datetime):
payload['mute_end_timestamp'] = str(int(duration.timestamp() * 1000))
else:
payload['mute_seconds'] = str(duration)
r = Route('PATCH', '/guilds/{guild_id}/mute', guild_id=guild_id)
return self.request(r, json=payload, reason=reason)
def mute_guild(
self, guild_id: int, duration: Union[datetime.datetime, int], reason: Optional[str] = None
) -> Response[None]:
payload: Dict[str, Any] = {}
if isinstance(duration, datetime.datetime):
payload['mute_end_timestamp'] = str(int(duration.timestamp() * 1000))
else:
payload['mute_seconds'] = str(duration)
r = Route('PATCH', '/guilds/{guild_id}/mute', guild_id=guild_id)
return self.request(r, json=payload, reason=reason)
def delete_message(
self, channel_id: int, message_id: str, hidetip: bool, *, reason: Optional[str] = None,
) -> Response[None]:
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id,
message_id=message_id)
params = {'hidetip': 'true' if hidetip else 'false'}
return self.request(r, reason=reason, params=params)
def delete_messages(
self, channel_id: int, message_ids: List[str], *, reason: Optional[str] = None
) -> Response[None]:
r = Route('POST', '/channels/{channel_id}/messages/bulk-delete', channel_id=channel_id)
payload = {
'messages': message_ids,
}
return self.request(r, json=payload, reason=reason)
def logs_from(
self,
channel_id: int,
limit: int,
before: Optional[datetime.datetime] = None,
after: Optional[datetime.datetime] = None,
around: Optional[datetime.datetime] = None,
) -> Response[List[message.Message]]:
params: Dict[str, Any] = {
'limit': limit,
}
if before is not None:
params['before'] = datetime.datetime.timestamp(before)
if after is not None:
params['after'] = datetime.datetime.timestamp(after)
if around is not None:
params['around'] = datetime.datetime.timestamp(around)
return self.request(Route('GET', '/channels/{channel_id}/messages', channel_id=channel_id), params=params)
def get_permission(self, guild_id: int) -> Response[List[PermissionPayload]]:
return self.request(Route('GET', '/guilds/{guild_id}/api_permission', guild_id=guild_id))
def demand_permission(
self,
guild_id: int,
channel_id: int,
desc: str,
path: str,
method: str
) -> Response[PermissionDemandPayload]:
payload: Dict[str, Any] = {
"channel_id": str(channel_id),
"api_identify": {"path": path, "method": method},
"desc": desc
}
return self.request(Route('POST', '/guilds/{guild_id}/api_permission/demand', guild_id=guild_id), json=payload)
def global_pin_message(
self,
guild_id: int,
channel_id: Optional[int] = None,
message_id: Optional[str] = None,
announces_type: Optional[int] = None,
recommend_channels: Optional[Dict[int, str]] = None,
reason: Optional[str] = None
):
r = Route(
'POST',
'/guilds/{guild_id}/announces',
guild_id=guild_id,
)
if recommend_channels:
recommend_channels: List[Dict[str, str]] = [
{
'channel_id': str(m),
'introduce': n
} for m, n in recommend_channels.items()
]
payload: Dict[str, Any] = {}
if channel_id is not None:
payload['channel_id'] = channel_id
if message_id is not None:
payload['message_id'] = message_id
if announces_type is not None:
payload['announces_types'] = announces_type
if recommend_channels is not None:
payload['recommend_channels'] = recommend_channels
return self.request(r, json=payload, reason=reason)
def global_unpin_message(self, guild_id: int, message_id: str, reason: Optional[str] = None):
r = Route(
'DELETE',
'/guilds/{guild_id}/announces/{message_id}',
guild_id=guild_id,
message_id=message_id
)
return self.request(r, reason=reason)
def channel_pin_message(self, channel_id: int, message_id: str, reason: Optional[str] = None):
r = Route(
'POST',
'/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id,
message_id=message_id,
)
return self.request(r, reason=reason)
def channel_unpin_message(self, channel_id: int, message_id: str, reason: Optional[str] = None):
r = Route(
'DELETE',
'/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id,
message_id=message_id
)
return self.request(r, reason=reason)
def add_reaction(
self, channel_id: int, message_id: str, custom: bool, id: int
) -> Response[None]:
r = Route(
'PUT',
'/channels/{channel_id}/messages/{message_id}/reactions/{type}/{id}',
channel_id=channel_id,
message_id=message_id,
type=1 if custom else 2,
id=id
)
return self.request(r)
def remove_reaction(
self, channel_id: int, message_id: str, custom: bool, id: int
) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/{member_id}',
channel_id=channel_id,
message_id=message_id,
type=1 if custom else 2,
id=id
)
return self.request(r)
def send_guide(
self,
channel_id: int,
content: str,
):
r = Route(
'POST',
'/channels/{channel_id}/settingguide',
channel_id=channel_id
)
payload = {'content': content}
return self.request(r, json=payload)
| true | true |
1c3aacb6c616d830effc8166735c00bdd26092f7 | 1,288 | py | Python | htpanel/app.py | mpavelka/htpasswd-webpanel | eba3f270316ea288e41491ef27f7f45bb219aeef | [
"MIT"
] | null | null | null | htpanel/app.py | mpavelka/htpasswd-webpanel | eba3f270316ea288e41491ef27f7f45bb219aeef | [
"MIT"
] | null | null | null | htpanel/app.py | mpavelka/htpasswd-webpanel | eba3f270316ea288e41491ef27f7f45bb219aeef | [
"MIT"
] | null | null | null | import asab
import asab.web
import logging
import os
from .template import Jinja2TemplateService
from .url import URLService
from .htpasswd.handler import HtpasswdHandler
from .htpasswd.service import HtpasswdService
###
L = logging.getLogger(__name__)
###
asab.Config.add_defaults({
"htpasswd_webpanel": {
"secret": "",
"basepath": "",
"staticpath": "",
"listen": "0.0.0.0:8080",
}
})
class Application(asab.Application):
def __init__(self):
super().__init__()
# Generate safe application secret
if len(asab.Config["htpasswd_webpanel"]["secret"]) < 32:
L.warn("Weak or empty application secret is set up. It will be generated.")
asab.Config["htpasswd_webpanel"]["secret"] = os.urandom(16).hex()
# Set basepath from envvar
if len(asab.Config["htpasswd_webpanel"]["basepath"]) == 0:
asab.Config["htpasswd_webpanel"]["basepath"] = os.environ.get('HTPANEL_BASEPATH', '')
# Web module/service
self.add_module(asab.web.Module)
websvc = self.get_service('asab.WebService')
# Web container
self.WebContainer = asab.web.WebContainer(
websvc,
'htpasswd_webpanel',
config={
"listen": asab.Config["htpasswd_webpanel"]["listen"]
}
)
URLService(self)
Jinja2TemplateService(self)
HtpasswdService(self)
HtpasswdHandler(self)
| 22.596491 | 88 | 0.710404 | import asab
import asab.web
import logging
import os
from .template import Jinja2TemplateService
from .url import URLService
from .htpasswd.handler import HtpasswdHandler
from .htpasswd.service import HtpasswdService
= logging.getLogger(__name__)
sab.Config.add_defaults({
"htpasswd_webpanel": {
"secret": "",
"basepath": "",
"staticpath": "",
"listen": "0.0.0.0:8080",
}
})
class Application(asab.Application):
def __init__(self):
super().__init__()
if len(asab.Config["htpasswd_webpanel"]["secret"]) < 32:
L.warn("Weak or empty application secret is set up. It will be generated.")
asab.Config["htpasswd_webpanel"]["secret"] = os.urandom(16).hex()
if len(asab.Config["htpasswd_webpanel"]["basepath"]) == 0:
asab.Config["htpasswd_webpanel"]["basepath"] = os.environ.get('HTPANEL_BASEPATH', '')
self.add_module(asab.web.Module)
websvc = self.get_service('asab.WebService')
self.WebContainer = asab.web.WebContainer(
websvc,
'htpasswd_webpanel',
config={
"listen": asab.Config["htpasswd_webpanel"]["listen"]
}
)
URLService(self)
Jinja2TemplateService(self)
HtpasswdService(self)
HtpasswdHandler(self)
| true | true |
1c3aadbf71b01cdadf7ef4006970fff5e47739a5 | 4,907 | py | Python | data/data_utils.py | zheang01/FACT | a877cc86acc4d29fb7589c8ac571c8aef09e5fd8 | [
"MIT"
] | 65 | 2021-06-14T16:16:40.000Z | 2022-03-30T03:10:52.000Z | data/data_utils.py | zheang01/FACT | a877cc86acc4d29fb7589c8ac571c8aef09e5fd8 | [
"MIT"
] | 5 | 2021-07-14T06:58:38.000Z | 2021-11-29T10:52:27.000Z | data/data_utils.py | zheang01/FACT | a877cc86acc4d29fb7589c8ac571c8aef09e5fd8 | [
"MIT"
] | 13 | 2021-06-14T16:16:40.000Z | 2022-03-14T12:29:19.000Z | from torchvision import transforms
import random
import torch
import numpy as np
from math import sqrt
def dataset_info(filepath):
with open(filepath, 'r') as f:
images_list = f.readlines()
file_names = []
labels = []
for row in images_list:
row = row.strip().split(' ')
file_names.append(row[0])
labels.append(int(row[1]))
return file_names, labels
def get_img_transform(train=False, image_size=224, crop=False, jitter=0):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if train:
if crop:
img_transform = [transforms.RandomResizedCrop(image_size, scale=[0.8, 1.0])]
else:
img_transform = [transforms.Resize((image_size, image_size))]
if jitter > 0:
img_transform.append(transforms.ColorJitter(brightness=jitter,
contrast=jitter,
saturation=jitter,
hue=min(0.5, jitter)))
img_transform += [transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)]
img_transform = transforms.Compose(img_transform)
else:
img_transform = transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
return img_transform
def get_pre_transform(image_size=224, crop=False, jitter=0):
if crop:
img_transform = [transforms.RandomResizedCrop(image_size, scale=[0.8, 1.0])]
else:
img_transform = [transforms.Resize((image_size, image_size))]
if jitter > 0:
img_transform.append(transforms.ColorJitter(brightness=jitter,
contrast=jitter,
saturation=jitter,
hue=min(0.5, jitter)))
img_transform += [transforms.RandomHorizontalFlip(), lambda x: np.asarray(x)]
img_transform = transforms.Compose(img_transform)
return img_transform
def get_post_transform(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
return img_transform
def get_spectrum(img):
img_fft = np.fft.fft2(img)
img_abs = np.abs(img_fft)
img_pha = np.angle(img_fft)
return img_abs, img_pha
def get_centralized_spectrum(img):
img_fft = np.fft.fft2(img)
img_fft = np.fft.fftshift(img_fft)
img_abs = np.abs(img_fft)
img_pha = np.angle(img_fft)
return img_abs, img_pha
def colorful_spectrum_mix(img1, img2, alpha, ratio=1.0):
"""Input image size: ndarray of [H, W, C]"""
lam = np.random.uniform(0, alpha)
assert img1.shape == img2.shape
h, w, c = img1.shape
h_crop = int(h * sqrt(ratio))
w_crop = int(w * sqrt(ratio))
h_start = h // 2 - h_crop // 2
w_start = w // 2 - w_crop // 2
img1_fft = np.fft.fft2(img1, axes=(0, 1))
img2_fft = np.fft.fft2(img2, axes=(0, 1))
img1_abs, img1_pha = np.abs(img1_fft), np.angle(img1_fft)
img2_abs, img2_pha = np.abs(img2_fft), np.angle(img2_fft)
img1_abs = np.fft.fftshift(img1_abs, axes=(0, 1))
img2_abs = np.fft.fftshift(img2_abs, axes=(0, 1))
img1_abs_ = np.copy(img1_abs)
img2_abs_ = np.copy(img2_abs)
img1_abs[h_start:h_start + h_crop, w_start:w_start + w_crop] = \
lam * img2_abs_[h_start:h_start + h_crop, w_start:w_start + w_crop] + (1 - lam) * img1_abs_[
h_start:h_start + h_crop,
w_start:w_start + w_crop]
img2_abs[h_start:h_start + h_crop, w_start:w_start + w_crop] = \
lam * img1_abs_[h_start:h_start + h_crop, w_start:w_start + w_crop] + (1 - lam) * img2_abs_[
h_start:h_start + h_crop,
w_start:w_start + w_crop]
img1_abs = np.fft.ifftshift(img1_abs, axes=(0, 1))
img2_abs = np.fft.ifftshift(img2_abs, axes=(0, 1))
img21 = img1_abs * (np.e ** (1j * img1_pha))
img12 = img2_abs * (np.e ** (1j * img2_pha))
img21 = np.real(np.fft.ifft2(img21, axes=(0, 1)))
img12 = np.real(np.fft.ifft2(img12, axes=(0, 1)))
img21 = np.uint8(np.clip(img21, 0, 255))
img12 = np.uint8(np.clip(img12, 0, 255))
return img21, img12 | 39.572581 | 116 | 0.539637 | from torchvision import transforms
import random
import torch
import numpy as np
from math import sqrt
def dataset_info(filepath):
with open(filepath, 'r') as f:
images_list = f.readlines()
file_names = []
labels = []
for row in images_list:
row = row.strip().split(' ')
file_names.append(row[0])
labels.append(int(row[1]))
return file_names, labels
def get_img_transform(train=False, image_size=224, crop=False, jitter=0):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if train:
if crop:
img_transform = [transforms.RandomResizedCrop(image_size, scale=[0.8, 1.0])]
else:
img_transform = [transforms.Resize((image_size, image_size))]
if jitter > 0:
img_transform.append(transforms.ColorJitter(brightness=jitter,
contrast=jitter,
saturation=jitter,
hue=min(0.5, jitter)))
img_transform += [transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)]
img_transform = transforms.Compose(img_transform)
else:
img_transform = transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
return img_transform
def get_pre_transform(image_size=224, crop=False, jitter=0):
if crop:
img_transform = [transforms.RandomResizedCrop(image_size, scale=[0.8, 1.0])]
else:
img_transform = [transforms.Resize((image_size, image_size))]
if jitter > 0:
img_transform.append(transforms.ColorJitter(brightness=jitter,
contrast=jitter,
saturation=jitter,
hue=min(0.5, jitter)))
img_transform += [transforms.RandomHorizontalFlip(), lambda x: np.asarray(x)]
img_transform = transforms.Compose(img_transform)
return img_transform
def get_post_transform(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
return img_transform
def get_spectrum(img):
img_fft = np.fft.fft2(img)
img_abs = np.abs(img_fft)
img_pha = np.angle(img_fft)
return img_abs, img_pha
def get_centralized_spectrum(img):
img_fft = np.fft.fft2(img)
img_fft = np.fft.fftshift(img_fft)
img_abs = np.abs(img_fft)
img_pha = np.angle(img_fft)
return img_abs, img_pha
def colorful_spectrum_mix(img1, img2, alpha, ratio=1.0):
lam = np.random.uniform(0, alpha)
assert img1.shape == img2.shape
h, w, c = img1.shape
h_crop = int(h * sqrt(ratio))
w_crop = int(w * sqrt(ratio))
h_start = h // 2 - h_crop // 2
w_start = w // 2 - w_crop // 2
img1_fft = np.fft.fft2(img1, axes=(0, 1))
img2_fft = np.fft.fft2(img2, axes=(0, 1))
img1_abs, img1_pha = np.abs(img1_fft), np.angle(img1_fft)
img2_abs, img2_pha = np.abs(img2_fft), np.angle(img2_fft)
img1_abs = np.fft.fftshift(img1_abs, axes=(0, 1))
img2_abs = np.fft.fftshift(img2_abs, axes=(0, 1))
img1_abs_ = np.copy(img1_abs)
img2_abs_ = np.copy(img2_abs)
img1_abs[h_start:h_start + h_crop, w_start:w_start + w_crop] = \
lam * img2_abs_[h_start:h_start + h_crop, w_start:w_start + w_crop] + (1 - lam) * img1_abs_[
h_start:h_start + h_crop,
w_start:w_start + w_crop]
img2_abs[h_start:h_start + h_crop, w_start:w_start + w_crop] = \
lam * img1_abs_[h_start:h_start + h_crop, w_start:w_start + w_crop] + (1 - lam) * img2_abs_[
h_start:h_start + h_crop,
w_start:w_start + w_crop]
img1_abs = np.fft.ifftshift(img1_abs, axes=(0, 1))
img2_abs = np.fft.ifftshift(img2_abs, axes=(0, 1))
img21 = img1_abs * (np.e ** (1j * img1_pha))
img12 = img2_abs * (np.e ** (1j * img2_pha))
img21 = np.real(np.fft.ifft2(img21, axes=(0, 1)))
img12 = np.real(np.fft.ifft2(img12, axes=(0, 1)))
img21 = np.uint8(np.clip(img21, 0, 255))
img12 = np.uint8(np.clip(img12, 0, 255))
return img21, img12 | true | true |
1c3aaee898c9af4b492728a688ea91e279cfa236 | 59,831 | py | Python | pysnmp/XUPS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/XUPS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/XUPS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module XUPS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/XUPS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:38:10 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint")
ifDescr, ifIndex = mibBuilder.importSymbols("IF-MIB", "ifDescr", "ifIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, enterprises, NotificationType, Integer32, MibIdentifier, iso, TimeTicks, Counter64, Unsigned32, ModuleIdentity, ObjectIdentity, Counter32, NotificationType, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "enterprises", "NotificationType", "Integer32", "MibIdentifier", "iso", "TimeTicks", "Counter64", "Unsigned32", "ModuleIdentity", "ObjectIdentity", "Counter32", "NotificationType", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
powerware = MibIdentifier((1, 3, 6, 1, 4, 1, 534))
xups = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1))
xupsIdent = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 1))
xupsBattery = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 2))
xupsInput = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 3))
xupsOutput = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 4))
xupsBypass = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 5))
xupsEnvironment = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 6))
xupsAlarm = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7))
xupsTest = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 8))
xupsControl = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 9))
xupsConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 10))
xupsTrapControl = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 11))
xupsRecep = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 12))
xupsTopology = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 13))
xupsObjectId = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2))
powerwareEthernetSnmpAdapter = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 1))
powerwareNetworkSnmpAdapterEther = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 2))
powerwareNetworkSnmpAdapterToken = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 3))
onlinetDaemon = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 4))
connectUPSAdapterEthernet = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 5))
powerwareNetworkDigitalIOEther = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 6))
connectUPSAdapterTokenRing = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 7))
simpleSnmpAdapter = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 8))
xupsIdentManufacturer = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsIdentManufacturer.setStatus('mandatory')
xupsIdentModel = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsIdentModel.setStatus('mandatory')
xupsIdentSoftwareVersion = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsIdentSoftwareVersion.setStatus('mandatory')
xupsIdentOemCode = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsIdentOemCode.setStatus('mandatory')
xupsBatTimeRemaining = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBatTimeRemaining.setStatus('mandatory')
xupsBatVoltage = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBatVoltage.setStatus('mandatory')
xupsBatCurrent = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBatCurrent.setStatus('mandatory')
xupsBatCapacity = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBatCapacity.setStatus('mandatory')
xupsBatteryAbmStatus = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("batteryCharging", 1), ("batteryDischarging", 2), ("batteryFloating", 3), ("batteryResting", 4), ("unknown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBatteryAbmStatus.setStatus('mandatory')
xupsInputFrequency = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 3, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputFrequency.setStatus('mandatory')
xupsInputLineBads = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 3, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputLineBads.setStatus('mandatory')
xupsInputNumPhases = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 3, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputNumPhases.setStatus('mandatory')
xupsInputTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 3, 4), )
if mibBuilder.loadTexts: xupsInputTable.setStatus('mandatory')
xupsInputEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 3, 4, 1), ).setIndexNames((0, "XUPS-MIB", "xupsInputPhase"))
if mibBuilder.loadTexts: xupsInputEntry.setStatus('mandatory')
xupsInputPhase = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 3, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputPhase.setStatus('mandatory')
xupsInputVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 3, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputVoltage.setStatus('mandatory')
xupsInputCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 3, 4, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputCurrent.setStatus('mandatory')
xupsInputWatts = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 3, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputWatts.setStatus('mandatory')
xupsInputSource = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 3, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("other", 1), ("none", 2), ("primaryUtility", 3), ("bypassFeed", 4), ("secondaryUtility", 5), ("generator", 6), ("flywheel", 7), ("fuelcell", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputSource.setStatus('mandatory')
xupsOutputLoad = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 4, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 200))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputLoad.setStatus('mandatory')
xupsOutputFrequency = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 4, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputFrequency.setStatus('mandatory')
xupsOutputNumPhases = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 4, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputNumPhases.setStatus('mandatory')
xupsOutputTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 4, 4), )
if mibBuilder.loadTexts: xupsOutputTable.setStatus('mandatory')
xupsOutputEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 4, 4, 1), ).setIndexNames((0, "XUPS-MIB", "xupsOutputPhase"))
if mibBuilder.loadTexts: xupsOutputEntry.setStatus('mandatory')
xupsOutputPhase = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 4, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputPhase.setStatus('mandatory')
xupsOutputVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 4, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputVoltage.setStatus('mandatory')
xupsOutputCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 4, 4, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputCurrent.setStatus('mandatory')
xupsOutputWatts = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 4, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputWatts.setStatus('mandatory')
xupsOutputSource = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 4, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("other", 1), ("none", 2), ("normal", 3), ("bypass", 4), ("battery", 5), ("booster", 6), ("reducer", 7), ("parallelCapacity", 8), ("parallelRedundant", 9), ("highEfficiencyMode", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputSource.setStatus('mandatory')
xupsBypassFrequency = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 5, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBypassFrequency.setStatus('mandatory')
xupsBypassNumPhases = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 5, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBypassNumPhases.setStatus('mandatory')
xupsBypassTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 5, 3), )
if mibBuilder.loadTexts: xupsBypassTable.setStatus('mandatory')
xupsBypassEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 5, 3, 1), ).setIndexNames((0, "XUPS-MIB", "xupsBypassPhase"))
if mibBuilder.loadTexts: xupsBypassEntry.setStatus('mandatory')
xupsBypassPhase = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 5, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBypassPhase.setStatus('mandatory')
xupsBypassVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 5, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBypassVoltage.setStatus('mandatory')
xupsEnvAmbientTemp = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsEnvAmbientTemp.setStatus('mandatory')
xupsEnvAmbientLowerLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvAmbientLowerLimit.setStatus('mandatory')
xupsEnvAmbientUpperLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvAmbientUpperLimit.setStatus('mandatory')
xupsEnvAmbientHumidity = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsEnvAmbientHumidity.setStatus('mandatory')
xupsEnvRemoteTemp = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsEnvRemoteTemp.setStatus('mandatory')
xupsEnvRemoteHumidity = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsEnvRemoteHumidity.setStatus('mandatory')
xupsEnvNumContacts = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsEnvNumContacts.setStatus('mandatory')
xupsContactSenseTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 6, 8), )
if mibBuilder.loadTexts: xupsContactSenseTable.setStatus('mandatory')
xupsContactsTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 6, 8, 1), ).setIndexNames((0, "XUPS-MIB", "xupsContactIndex"))
if mibBuilder.loadTexts: xupsContactsTableEntry.setStatus('mandatory')
xupsContactIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 6, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024)))
if mibBuilder.loadTexts: xupsContactIndex.setStatus('mandatory')
xupsContactType = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 6, 8, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("normallyOpen", 1), ("normallyClosed", 2), ("anyChange", 3), ("notUsed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsContactType.setStatus('mandatory')
xupsContactState = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 6, 8, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("open", 1), ("closed", 2), ("openWithNotice", 3), ("closedWithNotice", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsContactState.setStatus('mandatory')
xupsContactDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 6, 8, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsContactDescr.setStatus('mandatory')
xupsEnvRemoteTempLowerLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvRemoteTempLowerLimit.setStatus('mandatory')
xupsEnvRemoteTempUpperLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvRemoteTempUpperLimit.setStatus('mandatory')
xupsEnvRemoteHumidityLowerLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvRemoteHumidityLowerLimit.setStatus('mandatory')
xupsEnvRemoteHumidityUpperLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvRemoteHumidityUpperLimit.setStatus('mandatory')
xupsAlarms = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 7, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarms.setStatus('mandatory')
xupsAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 7, 2), )
if mibBuilder.loadTexts: xupsAlarmTable.setStatus('mandatory')
xupsAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 7, 2, 1), ).setIndexNames((0, "XUPS-MIB", "xupsAlarmID"))
if mibBuilder.loadTexts: xupsAlarmEntry.setStatus('mandatory')
xupsAlarmID = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmID.setStatus('mandatory')
xupsAlarmDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 2, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmDescr.setStatus('mandatory')
xupsAlarmTime = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 2, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmTime.setStatus('mandatory')
xupsOnBattery = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 3))
xupsLowBattery = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 4))
xupsUtilityPowerRestored = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 5))
xupsReturnFromLowBattery = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 6))
xupsOutputOverload = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 7))
xupsInternalFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 8))
xupsBatteryDischarged = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 9))
xupsInverterFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 10))
xupsOnBypass = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 11))
xupsBypassNotAvailable = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 12))
xupsOutputOff = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 13))
xupsInputFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 14))
xupsBuildingAlarm = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 15))
xupsShutdownImminent = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 16))
xupsOnInverter = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 17))
xupsAlarmNumEvents = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 7, 18), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmNumEvents.setStatus('mandatory')
xupsAlarmEventTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 7, 19), )
if mibBuilder.loadTexts: xupsAlarmEventTable.setStatus('mandatory')
xupsAlarmEventEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1), ).setIndexNames((0, "XUPS-MIB", "xupsAlarmEventID"))
if mibBuilder.loadTexts: xupsAlarmEventEntry.setStatus('mandatory')
xupsAlarmEventID = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 400))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmEventID.setStatus('deprecated')
xupsAlarmEventDateAndTime = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 22))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmEventDateAndTime.setStatus('deprecated')
xupsAlarmEventKind = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("occurred", 1), ("cleared", 2), ("unknown", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmEventKind.setStatus('deprecated')
xupsAlarmEventDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmEventDescr.setStatus('deprecated')
xupsAlarmEventMsg = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmEventMsg.setStatus('mandatory')
xupsBreakerOpen = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 20))
xupsAlarmEntryAdded = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 21))
xupsAlarmEntryRemoved = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 22))
xupsAlarmBatteryBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 23))
xupsOutputOffAsRequested = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 24))
xupsDiagnosticTestFailed = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 25))
xupsCommunicationsLost = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 26))
xupsUpsShutdownPending = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 27))
xupsAlarmTestInProgress = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 28))
xupsAmbientTempBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 29))
xupsLossOfRedundancy = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 30))
xupsAlarmTempBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 31))
xupsAlarmChargerFailed = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 32))
xupsAlarmFanFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 33))
xupsAlarmFuseFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 34))
xupsPowerSwitchBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 35))
xupsModuleFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 36))
xupsOnAlternatePowerSource = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 37))
xupsAltPowerNotAvailable = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 38))
xupsNoticeCondition = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 39))
xupsRemoteTempBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 40))
xupsRemoteHumidityBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 41))
xupsTestBattery = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 8, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("startTest", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsTestBattery.setStatus('mandatory')
xupsTestBatteryStatus = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 8, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("unknown", 1), ("passed", 2), ("failed", 3), ("inProgress", 4), ("notSupported", 5), ("inhibited", 6), ("scheduled", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsTestBatteryStatus.setStatus('mandatory')
xupsControlOutputOffDelay = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsControlOutputOffDelay.setStatus('mandatory')
xupsControlOutputOnDelay = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsControlOutputOnDelay.setStatus('mandatory')
xupsControlOutputOffTrapDelay = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsControlOutputOffTrapDelay.setStatus('mandatory')
xupsControlOutputOnTrapDelay = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsControlOutputOnTrapDelay.setStatus('deprecated')
xupsControlToBypassDelay = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsControlToBypassDelay.setStatus('mandatory')
xupsLoadShedSecsWithRestart = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsLoadShedSecsWithRestart.setStatus('mandatory')
xupsConfigOutputVoltage = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigOutputVoltage.setStatus('mandatory')
xupsConfigInputVoltage = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigInputVoltage.setStatus('mandatory')
xupsConfigOutputWatts = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigOutputWatts.setStatus('mandatory')
xupsConfigOutputFreq = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigOutputFreq.setStatus('mandatory')
xupsConfigDateAndTime = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 22))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsConfigDateAndTime.setStatus('mandatory')
xupsConfigLowOutputVoltageLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigLowOutputVoltageLimit.setStatus('mandatory')
xupsConfigHighOutputVoltageLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigHighOutputVoltageLimit.setStatus('mandatory')
xupsMaxTrapLevel = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 11, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("critical", 2), ("major", 3), ("allTraps", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsMaxTrapLevel.setStatus('mandatory')
xupsSendTrapType = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 11, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("stnd", 1), ("xups", 2), ("stndPlus", 3), ("xupsPlus", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsSendTrapType.setStatus('mandatory')
xupsTrapMessage = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 11, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79)))
if mibBuilder.loadTexts: xupsTrapMessage.setStatus('mandatory')
xupsNumReceptacles = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 12, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsNumReceptacles.setStatus('mandatory')
xupsRecepTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 12, 2), )
if mibBuilder.loadTexts: xupsRecepTable.setStatus('mandatory')
xupsRecepEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1), ).setIndexNames((0, "XUPS-MIB", "xupsRecepIndex"))
if mibBuilder.loadTexts: xupsRecepEntry.setStatus('mandatory')
xupsRecepIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsRecepIndex.setStatus('mandatory')
xupsRecepStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("pendingOff", 3), ("pendingOn", 4), ("unknown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsRecepStatus.setStatus('mandatory')
xupsRecepOffDelaySecs = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsRecepOffDelaySecs.setStatus('mandatory')
xupsRecepOnDelaySecs = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsRecepOnDelaySecs.setStatus('mandatory')
xupsRecepAutoOffDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 32767))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsRecepAutoOffDelay.setStatus('mandatory')
xupsRecepAutoOnDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 32767))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsRecepAutoOnDelay.setStatus('mandatory')
xupsRecepShedSecsWithRestart = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsRecepShedSecsWithRestart.setStatus('mandatory')
xupsTopologyType = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 13, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsTopologyType.setStatus('mandatory')
xupsTopoMachineCode = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 13, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsTopoMachineCode.setStatus('mandatory')
xupsTopoUnitNumber = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 13, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsTopoUnitNumber.setStatus('mandatory')
xupsTopoPowerStrategy = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 13, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("highAlert", 1), ("standard", 2), ("enableHighEfficiency", 3), ("immediateHighEfficiency", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsTopoPowerStrategy.setStatus('mandatory')
xupsNull = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 0))
xupsTrapBasic = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 0, 0))
xupsTrapSource = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 11, 4))
xupsTrapDefined = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1))
xupsTrapPortN = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2))
xupstbControlOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,1))
xupstbControlOn = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,2))
xupstbOnBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,3))
xupstbLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,4))
xupstbUtilityPowerRestored = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,5))
xupstbReturnFromLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,6))
xupstbOutputOverload = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,7))
xupstbInternalFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,8))
xupstbBatteryDischarged = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,9))
xupstbInverterFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,10))
xupstbOnBypass = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,11))
xupstbBypassNotAvailable = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,12))
xupstbOutputOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,13))
xupstbInputFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,14))
xupstbBuildingAlarm = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,15))
xupstbShutdownImminent = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,16))
xupstbOnInverter = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,17))
xupstbBreakerOpen = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,20))
xupstbAlarmEntryAdded = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,21))
xupstbAlarmEntryRemoved = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,22))
xupstdControlOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,1)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdControlOn = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,2)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOnBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,3)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,4)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdUtilityPowerRestored = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,5)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdReturnFromLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,6)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOutputOverload = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,7)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdInternalFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,8)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdBatteryDischarged = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,9)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdInverterFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,10)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOnBypass = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,11)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdBypassNotAvailable = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,12)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOutputOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,13)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdInputFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,14)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdBuildingAlarm = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,15)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdShutdownImminent = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,16)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOnInverter = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,17)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdBreakerOpen = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,20)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmEntryAdded = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,21)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmEntryRemoved = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,22)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmBatteryBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,23)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOutputOffAsRequested = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,24)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdDiagnosticTestFailed = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,25)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdCommunicationsLost = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,26)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdUpsShutdownPending = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,27)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmTestInProgress = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,28)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAmbientTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,29)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("XUPS-MIB", "xupsEnvAmbientTemp"), ("XUPS-MIB", "xupsEnvAmbientLowerLimit"), ("XUPS-MIB", "xupsEnvAmbientUpperLimit"))
xupstdContactActiveNotice = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,30)).setObjects(("XUPS-MIB", "xupsContactIndex"), ("XUPS-MIB", "xupsContactType"), ("XUPS-MIB", "xupsContactState"), ("XUPS-MIB", "xupsContactDescr"))
xupstdContactInactiveNotice = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,31)).setObjects(("XUPS-MIB", "xupsContactIndex"), ("XUPS-MIB", "xupsContactType"), ("XUPS-MIB", "xupsContactState"), ("XUPS-MIB", "xupsContactDescr"))
xupstdLossOfRedundancy = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,32)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,33)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmChargerFailed = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,34)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmFanFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,35)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmFuseFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,36)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdPowerSwitchBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,37)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdModuleFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,38)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOnAlternatePowerSource = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,39)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("XUPS-MIB", "xupsInputSource"))
xupstdAltPowerNotAvailable = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,40)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdNoticeCondition = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,41)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdRemoteTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,42)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("XUPS-MIB", "xupsEnvRemoteTemp"), ("XUPS-MIB", "xupsEnvRemoteTempLowerLimit"), ("XUPS-MIB", "xupsEnvRemoteTempUpperLimit"))
xupstdRemoteHumidityBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,43)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("XUPS-MIB", "xupsEnvRemoteHumidity"), ("XUPS-MIB", "xupsEnvRemoteHumidityLowerLimit"), ("XUPS-MIB", "xupsEnvRemoteHumidityUpperLimit"))
xupstpControlOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,1)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpControlOn = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,2)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOnBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,3)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,4)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpUtilityPowerRestored = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,5)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpReturnFromLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,6)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOutputOverload = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,7)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpInternalFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,8)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpBatteryDischarged = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,9)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpInverterFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,10)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOnBypass = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,11)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpBypassNotAvailable = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,12)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOutputOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,13)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpInputFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,14)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpBuildingAlarm = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,15)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpShutdownImminent = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,16)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOnInverter = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,17)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpBreakerOpen = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,20)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmEntryAdded = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,21)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmEntryRemoved = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,22)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmBatteryBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,23)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOutputOffAsRequested = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,24)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpDiagnosticTestFailed = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,25)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpCommunicationsLost = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,26)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpUpsShutdownPending = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,27)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmTestInProgress = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,28)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAmbientTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,29)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"), ("XUPS-MIB", "xupsEnvAmbientTemp"), ("XUPS-MIB", "xupsEnvAmbientLowerLimit"), ("XUPS-MIB", "xupsEnvAmbientUpperLimit"))
xupstpLossOfRedundancy = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,32)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,33)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmChargerFailed = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,34)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmFanFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,35)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmFuseFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,36)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpPowerSwitchBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,37)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpModuleFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,38)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOnAlternatePowerSource = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,39)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("XUPS-MIB", "xupsInputSource"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAltPowerNotAvailable = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,40)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpNoticeCondition = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,41)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpRemoteTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,42)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"), ("XUPS-MIB", "xupsEnvRemoteTemp"), ("XUPS-MIB", "xupsEnvRemoteTempLowerLimit"), ("XUPS-MIB", "xupsEnvRemoteTempUpperLimit"))
xupstpRemoteHumidityBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,43)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"), ("XUPS-MIB", "xupsEnvRemoteHumidity"), ("XUPS-MIB", "xupsEnvRemoteHumidityLowerLimit"), ("XUPS-MIB", "xupsEnvRemoteHumidityUpperLimit"))
mibBuilder.exportSymbols("XUPS-MIB", xupsOutputFrequency=xupsOutputFrequency, xupsObjectId=xupsObjectId, xupsBypassFrequency=xupsBypassFrequency, xupsContactState=xupsContactState, xupsControlOutputOnTrapDelay=xupsControlOutputOnTrapDelay, xupstdControlOn=xupstdControlOn, xupsEnvRemoteTempUpperLimit=xupsEnvRemoteTempUpperLimit, xupsEnvAmbientUpperLimit=xupsEnvAmbientUpperLimit, xupstpPowerSwitchBad=xupstpPowerSwitchBad, xupsBypassEntry=xupsBypassEntry, xupsAlarmFanFailure=xupsAlarmFanFailure, xupsInputSource=xupsInputSource, xupstbControlOff=xupstbControlOff, xupsAlarmTime=xupsAlarmTime, xupsOutputSource=xupsOutputSource, xupstdNoticeCondition=xupstdNoticeCondition, xupstdBypassNotAvailable=xupstdBypassNotAvailable, xupstdAlarmFuseFailure=xupstdAlarmFuseFailure, xupstdOnBattery=xupstdOnBattery, xupstbBypassNotAvailable=xupstbBypassNotAvailable, xupsControl=xupsControl, xupstdAltPowerNotAvailable=xupstdAltPowerNotAvailable, xupstdContactActiveNotice=xupstdContactActiveNotice, xupsAlarmID=xupsAlarmID, xupsTopology=xupsTopology, connectUPSAdapterEthernet=connectUPSAdapterEthernet, xupsEnvNumContacts=xupsEnvNumContacts, xupsOutputCurrent=xupsOutputCurrent, xupstpModuleFailure=xupstpModuleFailure, xupsAlarmChargerFailed=xupsAlarmChargerFailed, xupstdRemoteTempBad=xupstdRemoteTempBad, xupsBatteryDischarged=xupsBatteryDischarged, xupstdOutputOff=xupstdOutputOff, xupsAlarmEventKind=xupsAlarmEventKind, xupsIdent=xupsIdent, xupstpInputFailure=xupstpInputFailure, xupsInputNumPhases=xupsInputNumPhases, xupsInputTable=xupsInputTable, xupsEnvRemoteHumidityLowerLimit=xupsEnvRemoteHumidityLowerLimit, xupsTestBattery=xupsTestBattery, xupsTrapMessage=xupsTrapMessage, xupsInputFrequency=xupsInputFrequency, xupsContactDescr=xupsContactDescr, xupsEnvAmbientLowerLimit=xupsEnvAmbientLowerLimit, xupsTrapBasic=xupsTrapBasic, xupsEnvRemoteHumidity=xupsEnvRemoteHumidity, xupsSendTrapType=xupsSendTrapType, xupstpOutputOff=xupstpOutputOff, xupsBypassNotAvailable=xupsBypassNotAvailable, xupsOutputTable=xupsOutputTable, xupsEnvironment=xupsEnvironment, xupstdShutdownImminent=xupstdShutdownImminent, xupstpBreakerOpen=xupstpBreakerOpen, xupstpRemoteTempBad=xupstpRemoteTempBad, xupstbOutputOff=xupstbOutputOff, xupstdOnInverter=xupstdOnInverter, xupsInput=xupsInput, xupsOutputNumPhases=xupsOutputNumPhases, xupsOutputEntry=xupsOutputEntry, xupsConfigOutputVoltage=xupsConfigOutputVoltage, xupstdAlarmBatteryBad=xupstdAlarmBatteryBad, xupsAlarmTable=xupsAlarmTable, xupstpAlarmFuseFailure=xupstpAlarmFuseFailure, xupstdRemoteHumidityBad=xupstdRemoteHumidityBad, xupstbReturnFromLowBattery=xupstbReturnFromLowBattery, xupstpLossOfRedundancy=xupstpLossOfRedundancy, xupsAltPowerNotAvailable=xupsAltPowerNotAvailable, xupsBypassNumPhases=xupsBypassNumPhases, xupsOutputOverload=xupsOutputOverload, xupstdAlarmEntryAdded=xupstdAlarmEntryAdded, xupstdLossOfRedundancy=xupstdLossOfRedundancy, xupsContactType=xupsContactType, xupstdReturnFromLowBattery=xupstdReturnFromLowBattery, xupsConfigHighOutputVoltageLimit=xupsConfigHighOutputVoltageLimit, xupstpOnAlternatePowerSource=xupstpOnAlternatePowerSource, xupstbBuildingAlarm=xupstbBuildingAlarm, xupstdContactInactiveNotice=xupstdContactInactiveNotice, xupsRecepIndex=xupsRecepIndex, xupsAlarmEventEntry=xupsAlarmEventEntry, xupsIdentModel=xupsIdentModel, xupstdUtilityPowerRestored=xupstdUtilityPowerRestored, xupstdAlarmEntryRemoved=xupstdAlarmEntryRemoved, xupstdAlarmTestInProgress=xupstdAlarmTestInProgress, xupsBatVoltage=xupsBatVoltage, xupstpAlarmTempBad=xupstpAlarmTempBad, xupsAlarm=xupsAlarm, xupstdInputFailure=xupstdInputFailure, xupstpControlOff=xupstpControlOff, xupsInputEntry=xupsInputEntry, xupstpBypassNotAvailable=xupstpBypassNotAvailable, xupsEnvRemoteTempLowerLimit=xupsEnvRemoteTempLowerLimit, xupstpDiagnosticTestFailed=xupstpDiagnosticTestFailed, xupsInputFailure=xupsInputFailure, xupsAlarmEntry=xupsAlarmEntry, xupsIdentOemCode=xupsIdentOemCode, xupstbBatteryDischarged=xupstbBatteryDischarged, xupsBatteryAbmStatus=xupsBatteryAbmStatus, xupstdAmbientTempBad=xupstdAmbientTempBad, xupsBattery=xupsBattery, xupstpAmbientTempBad=xupstpAmbientTempBad, xupsAlarmEventMsg=xupsAlarmEventMsg, xupsRemoteHumidityBad=xupsRemoteHumidityBad, xupsAlarmEventID=xupsAlarmEventID, xupstpBatteryDischarged=xupstpBatteryDischarged, xupstdOnBypass=xupstdOnBypass, xupsRecepTable=xupsRecepTable, xupstpLowBattery=xupstpLowBattery, xupsNumReceptacles=xupsNumReceptacles, xupsNull=xupsNull, xupstbInputFailure=xupstbInputFailure, xupstdBreakerOpen=xupstdBreakerOpen, xupstpUpsShutdownPending=xupstpUpsShutdownPending, xupsReturnFromLowBattery=xupsReturnFromLowBattery, xupsConfigOutputFreq=xupsConfigOutputFreq, xupsRecepOffDelaySecs=xupsRecepOffDelaySecs, xupstpAlarmTestInProgress=xupstpAlarmTestInProgress, xupstdAlarmChargerFailed=xupstdAlarmChargerFailed, xupstpAltPowerNotAvailable=xupstpAltPowerNotAvailable, xupsOnAlternatePowerSource=xupsOnAlternatePowerSource, xupstbUtilityPowerRestored=xupstbUtilityPowerRestored, xupsAlarmEventDescr=xupsAlarmEventDescr, xupsUpsShutdownPending=xupsUpsShutdownPending, xupsAlarmFuseFailure=xupsAlarmFuseFailure, xupsRecepAutoOnDelay=xupsRecepAutoOnDelay, xupstdCommunicationsLost=xupstdCommunicationsLost, xupstdAlarmFanFailure=xupstdAlarmFanFailure, xups=xups, xupsContactSenseTable=xupsContactSenseTable, xupstbOnBypass=xupstbOnBypass, xupsEnvRemoteHumidityUpperLimit=xupsEnvRemoteHumidityUpperLimit, xupstdOnAlternatePowerSource=xupstdOnAlternatePowerSource, xupstpAlarmChargerFailed=xupstpAlarmChargerFailed, xupsTrapDefined=xupsTrapDefined, xupstpCommunicationsLost=xupstpCommunicationsLost, xupsAlarmEventTable=xupsAlarmEventTable, xupstpOutputOverload=xupstpOutputOverload, xupsBypass=xupsBypass, xupsAlarmTempBad=xupsAlarmTempBad, xupstdDiagnosticTestFailed=xupstdDiagnosticTestFailed, xupsRecep=xupsRecep, xupsLossOfRedundancy=xupsLossOfRedundancy, xupsBatCurrent=xupsBatCurrent, xupsTrapPortN=xupsTrapPortN, xupsBatTimeRemaining=xupsBatTimeRemaining, xupsAlarmEntryRemoved=xupsAlarmEntryRemoved, xupstpOutputOffAsRequested=xupstpOutputOffAsRequested, xupsAlarmDescr=xupsAlarmDescr, xupsEnvAmbientHumidity=xupsEnvAmbientHumidity, xupsEnvRemoteTemp=xupsEnvRemoteTemp, xupsBypassVoltage=xupsBypassVoltage, xupsBuildingAlarm=xupsBuildingAlarm, xupstbOnInverter=xupstbOnInverter, xupstbInternalFailure=xupstbInternalFailure, xupsRemoteTempBad=xupsRemoteTempBad, xupsControlOutputOffDelay=xupsControlOutputOffDelay, xupsTrapSource=xupsTrapSource, xupstbShutdownImminent=xupstbShutdownImminent, xupstdInverterFailure=xupstdInverterFailure, xupstdOutputOverload=xupstdOutputOverload, powerwareNetworkDigitalIOEther=powerwareNetworkDigitalIOEther, xupsInputLineBads=xupsInputLineBads, xupstpUtilityPowerRestored=xupstpUtilityPowerRestored, xupsAmbientTempBad=xupsAmbientTempBad, xupstdInternalFailure=xupstdInternalFailure, xupsTest=xupsTest, xupsOutputPhase=xupsOutputPhase, xupsInternalFailure=xupsInternalFailure, xupstbOnBattery=xupstbOnBattery, xupsOutputOff=xupsOutputOff, xupsRecepOnDelaySecs=xupsRecepOnDelaySecs, xupsConfig=xupsConfig, xupstbAlarmEntryRemoved=xupstbAlarmEntryRemoved, xupsRecepEntry=xupsRecepEntry, xupstdOutputOffAsRequested=xupstdOutputOffAsRequested, xupstbInverterFailure=xupstbInverterFailure, xupsTopoUnitNumber=xupsTopoUnitNumber, xupsNoticeCondition=xupsNoticeCondition, xupsContactIndex=xupsContactIndex, xupsConfigDateAndTime=xupsConfigDateAndTime, xupstpInternalFailure=xupstpInternalFailure, xupsPowerSwitchBad=xupsPowerSwitchBad, powerwareEthernetSnmpAdapter=powerwareEthernetSnmpAdapter, xupsOnBattery=xupsOnBattery, xupsLowBattery=xupsLowBattery, powerwareNetworkSnmpAdapterEther=powerwareNetworkSnmpAdapterEther, xupstpAlarmBatteryBad=xupstpAlarmBatteryBad, xupsTrapControl=xupsTrapControl, xupstpAlarmEntryRemoved=xupstpAlarmEntryRemoved, simpleSnmpAdapter=simpleSnmpAdapter, onlinetDaemon=onlinetDaemon, xupsBreakerOpen=xupsBreakerOpen, xupsTopoPowerStrategy=xupsTopoPowerStrategy, xupstbControlOn=xupstbControlOn, xupstdBuildingAlarm=xupstdBuildingAlarm, xupsOnInverter=xupsOnInverter, xupsControlOutputOffTrapDelay=xupsControlOutputOffTrapDelay, xupsAlarmBatteryBad=xupsAlarmBatteryBad, xupstbAlarmEntryAdded=xupstbAlarmEntryAdded, xupsAlarmEntryAdded=xupsAlarmEntryAdded, xupsInputVoltage=xupsInputVoltage, xupsConfigLowOutputVoltageLimit=xupsConfigLowOutputVoltageLimit, xupstdUpsShutdownPending=xupstdUpsShutdownPending, xupsAlarmEventDateAndTime=xupsAlarmEventDateAndTime, xupstbOutputOverload=xupstbOutputOverload, xupsInputWatts=xupsInputWatts, connectUPSAdapterTokenRing=connectUPSAdapterTokenRing, xupstpOnInverter=xupstpOnInverter, xupsInputPhase=xupsInputPhase, xupsContactsTableEntry=xupsContactsTableEntry, xupstpAlarmFanFailure=xupstpAlarmFanFailure, xupsLoadShedSecsWithRestart=xupsLoadShedSecsWithRestart, xupsEnvAmbientTemp=xupsEnvAmbientTemp, xupsRecepShedSecsWithRestart=xupsRecepShedSecsWithRestart, xupsOutputOffAsRequested=xupsOutputOffAsRequested, xupstpInverterFailure=xupstpInverterFailure, xupsBypassTable=xupsBypassTable, xupsRecepAutoOffDelay=xupsRecepAutoOffDelay, xupsModuleFailure=xupsModuleFailure, xupstdLowBattery=xupstdLowBattery, xupsAlarmNumEvents=xupsAlarmNumEvents, xupsUtilityPowerRestored=xupsUtilityPowerRestored, xupsConfigInputVoltage=xupsConfigInputVoltage, xupsInverterFailure=xupsInverterFailure, xupstpReturnFromLowBattery=xupstpReturnFromLowBattery, xupsOutput=xupsOutput, xupsTestBatteryStatus=xupsTestBatteryStatus, xupsOutputVoltage=xupsOutputVoltage, xupstpRemoteHumidityBad=xupstpRemoteHumidityBad, xupsOutputLoad=xupsOutputLoad, xupsBypassPhase=xupsBypassPhase, xupsIdentManufacturer=xupsIdentManufacturer, xupsAlarms=xupsAlarms, xupstdAlarmTempBad=xupstdAlarmTempBad, xupstpOnBypass=xupstpOnBypass, xupstpShutdownImminent=xupstpShutdownImminent, xupsOnBypass=xupsOnBypass, xupsBatCapacity=xupsBatCapacity, xupsTopoMachineCode=xupsTopoMachineCode, xupsRecepStatus=xupsRecepStatus, powerware=powerware, xupsInputCurrent=xupsInputCurrent, xupstbLowBattery=xupstbLowBattery, xupstdPowerSwitchBad=xupstdPowerSwitchBad, xupsShutdownImminent=xupsShutdownImminent, xupsAlarmTestInProgress=xupsAlarmTestInProgress, xupsCommunicationsLost=xupsCommunicationsLost, powerwareNetworkSnmpAdapterToken=powerwareNetworkSnmpAdapterToken, xupstpAlarmEntryAdded=xupstpAlarmEntryAdded, xupsConfigOutputWatts=xupsConfigOutputWatts, xupstbBreakerOpen=xupstbBreakerOpen, xupstpOnBattery=xupstpOnBattery, xupsDiagnosticTestFailed=xupsDiagnosticTestFailed, xupstpControlOn=xupstpControlOn, xupsControlToBypassDelay=xupsControlToBypassDelay)
mibBuilder.exportSymbols("XUPS-MIB", xupstpBuildingAlarm=xupstpBuildingAlarm, xupsControlOutputOnDelay=xupsControlOutputOnDelay, xupstdBatteryDischarged=xupstdBatteryDischarged, xupstdControlOff=xupstdControlOff, xupsOutputWatts=xupsOutputWatts, xupsTopologyType=xupsTopologyType, xupstdModuleFailure=xupstdModuleFailure, xupstpNoticeCondition=xupstpNoticeCondition, xupsIdentSoftwareVersion=xupsIdentSoftwareVersion, xupsMaxTrapLevel=xupsMaxTrapLevel)
| 157.865435 | 10,518 | 0.73619 |
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint")
ifDescr, ifIndex = mibBuilder.importSymbols("IF-MIB", "ifDescr", "ifIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, enterprises, NotificationType, Integer32, MibIdentifier, iso, TimeTicks, Counter64, Unsigned32, ModuleIdentity, ObjectIdentity, Counter32, NotificationType, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "enterprises", "NotificationType", "Integer32", "MibIdentifier", "iso", "TimeTicks", "Counter64", "Unsigned32", "ModuleIdentity", "ObjectIdentity", "Counter32", "NotificationType", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
powerware = MibIdentifier((1, 3, 6, 1, 4, 1, 534))
xups = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1))
xupsIdent = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 1))
xupsBattery = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 2))
xupsInput = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 3))
xupsOutput = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 4))
xupsBypass = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 5))
xupsEnvironment = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 6))
xupsAlarm = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7))
xupsTest = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 8))
xupsControl = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 9))
xupsConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 10))
xupsTrapControl = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 11))
xupsRecep = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 12))
xupsTopology = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 13))
xupsObjectId = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2))
powerwareEthernetSnmpAdapter = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 1))
powerwareNetworkSnmpAdapterEther = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 2))
powerwareNetworkSnmpAdapterToken = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 3))
onlinetDaemon = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 4))
connectUPSAdapterEthernet = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 5))
powerwareNetworkDigitalIOEther = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 6))
connectUPSAdapterTokenRing = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 7))
simpleSnmpAdapter = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 2, 8))
xupsIdentManufacturer = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsIdentManufacturer.setStatus('mandatory')
xupsIdentModel = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsIdentModel.setStatus('mandatory')
xupsIdentSoftwareVersion = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsIdentSoftwareVersion.setStatus('mandatory')
xupsIdentOemCode = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsIdentOemCode.setStatus('mandatory')
xupsBatTimeRemaining = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBatTimeRemaining.setStatus('mandatory')
xupsBatVoltage = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBatVoltage.setStatus('mandatory')
xupsBatCurrent = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBatCurrent.setStatus('mandatory')
xupsBatCapacity = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBatCapacity.setStatus('mandatory')
xupsBatteryAbmStatus = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("batteryCharging", 1), ("batteryDischarging", 2), ("batteryFloating", 3), ("batteryResting", 4), ("unknown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBatteryAbmStatus.setStatus('mandatory')
xupsInputFrequency = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 3, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputFrequency.setStatus('mandatory')
xupsInputLineBads = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 3, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputLineBads.setStatus('mandatory')
xupsInputNumPhases = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 3, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputNumPhases.setStatus('mandatory')
xupsInputTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 3, 4), )
if mibBuilder.loadTexts: xupsInputTable.setStatus('mandatory')
xupsInputEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 3, 4, 1), ).setIndexNames((0, "XUPS-MIB", "xupsInputPhase"))
if mibBuilder.loadTexts: xupsInputEntry.setStatus('mandatory')
xupsInputPhase = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 3, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputPhase.setStatus('mandatory')
xupsInputVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 3, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputVoltage.setStatus('mandatory')
xupsInputCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 3, 4, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputCurrent.setStatus('mandatory')
xupsInputWatts = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 3, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputWatts.setStatus('mandatory')
xupsInputSource = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 3, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("other", 1), ("none", 2), ("primaryUtility", 3), ("bypassFeed", 4), ("secondaryUtility", 5), ("generator", 6), ("flywheel", 7), ("fuelcell", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsInputSource.setStatus('mandatory')
xupsOutputLoad = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 4, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 200))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputLoad.setStatus('mandatory')
xupsOutputFrequency = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 4, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputFrequency.setStatus('mandatory')
xupsOutputNumPhases = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 4, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputNumPhases.setStatus('mandatory')
xupsOutputTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 4, 4), )
if mibBuilder.loadTexts: xupsOutputTable.setStatus('mandatory')
xupsOutputEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 4, 4, 1), ).setIndexNames((0, "XUPS-MIB", "xupsOutputPhase"))
if mibBuilder.loadTexts: xupsOutputEntry.setStatus('mandatory')
xupsOutputPhase = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 4, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputPhase.setStatus('mandatory')
xupsOutputVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 4, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputVoltage.setStatus('mandatory')
xupsOutputCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 4, 4, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputCurrent.setStatus('mandatory')
xupsOutputWatts = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 4, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputWatts.setStatus('mandatory')
xupsOutputSource = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 4, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("other", 1), ("none", 2), ("normal", 3), ("bypass", 4), ("battery", 5), ("booster", 6), ("reducer", 7), ("parallelCapacity", 8), ("parallelRedundant", 9), ("highEfficiencyMode", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsOutputSource.setStatus('mandatory')
xupsBypassFrequency = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 5, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBypassFrequency.setStatus('mandatory')
xupsBypassNumPhases = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 5, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBypassNumPhases.setStatus('mandatory')
xupsBypassTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 5, 3), )
if mibBuilder.loadTexts: xupsBypassTable.setStatus('mandatory')
xupsBypassEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 5, 3, 1), ).setIndexNames((0, "XUPS-MIB", "xupsBypassPhase"))
if mibBuilder.loadTexts: xupsBypassEntry.setStatus('mandatory')
xupsBypassPhase = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 5, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBypassPhase.setStatus('mandatory')
xupsBypassVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 5, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsBypassVoltage.setStatus('mandatory')
xupsEnvAmbientTemp = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsEnvAmbientTemp.setStatus('mandatory')
xupsEnvAmbientLowerLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvAmbientLowerLimit.setStatus('mandatory')
xupsEnvAmbientUpperLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvAmbientUpperLimit.setStatus('mandatory')
xupsEnvAmbientHumidity = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsEnvAmbientHumidity.setStatus('mandatory')
xupsEnvRemoteTemp = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsEnvRemoteTemp.setStatus('mandatory')
xupsEnvRemoteHumidity = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsEnvRemoteHumidity.setStatus('mandatory')
xupsEnvNumContacts = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsEnvNumContacts.setStatus('mandatory')
xupsContactSenseTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 6, 8), )
if mibBuilder.loadTexts: xupsContactSenseTable.setStatus('mandatory')
xupsContactsTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 6, 8, 1), ).setIndexNames((0, "XUPS-MIB", "xupsContactIndex"))
if mibBuilder.loadTexts: xupsContactsTableEntry.setStatus('mandatory')
xupsContactIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 6, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024)))
if mibBuilder.loadTexts: xupsContactIndex.setStatus('mandatory')
xupsContactType = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 6, 8, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("normallyOpen", 1), ("normallyClosed", 2), ("anyChange", 3), ("notUsed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsContactType.setStatus('mandatory')
xupsContactState = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 6, 8, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("open", 1), ("closed", 2), ("openWithNotice", 3), ("closedWithNotice", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsContactState.setStatus('mandatory')
xupsContactDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 6, 8, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsContactDescr.setStatus('mandatory')
xupsEnvRemoteTempLowerLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvRemoteTempLowerLimit.setStatus('mandatory')
xupsEnvRemoteTempUpperLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-100, 200))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvRemoteTempUpperLimit.setStatus('mandatory')
xupsEnvRemoteHumidityLowerLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvRemoteHumidityLowerLimit.setStatus('mandatory')
xupsEnvRemoteHumidityUpperLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 6, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsEnvRemoteHumidityUpperLimit.setStatus('mandatory')
xupsAlarms = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 7, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarms.setStatus('mandatory')
xupsAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 7, 2), )
if mibBuilder.loadTexts: xupsAlarmTable.setStatus('mandatory')
xupsAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 7, 2, 1), ).setIndexNames((0, "XUPS-MIB", "xupsAlarmID"))
if mibBuilder.loadTexts: xupsAlarmEntry.setStatus('mandatory')
xupsAlarmID = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmID.setStatus('mandatory')
xupsAlarmDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 2, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmDescr.setStatus('mandatory')
xupsAlarmTime = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 2, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmTime.setStatus('mandatory')
xupsOnBattery = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 3))
xupsLowBattery = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 4))
xupsUtilityPowerRestored = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 5))
xupsReturnFromLowBattery = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 6))
xupsOutputOverload = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 7))
xupsInternalFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 8))
xupsBatteryDischarged = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 9))
xupsInverterFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 10))
xupsOnBypass = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 11))
xupsBypassNotAvailable = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 12))
xupsOutputOff = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 13))
xupsInputFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 14))
xupsBuildingAlarm = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 15))
xupsShutdownImminent = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 16))
xupsOnInverter = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 17))
xupsAlarmNumEvents = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 7, 18), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmNumEvents.setStatus('mandatory')
xupsAlarmEventTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 7, 19), )
if mibBuilder.loadTexts: xupsAlarmEventTable.setStatus('mandatory')
xupsAlarmEventEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1), ).setIndexNames((0, "XUPS-MIB", "xupsAlarmEventID"))
if mibBuilder.loadTexts: xupsAlarmEventEntry.setStatus('mandatory')
xupsAlarmEventID = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 400))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmEventID.setStatus('deprecated')
xupsAlarmEventDateAndTime = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 22))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmEventDateAndTime.setStatus('deprecated')
xupsAlarmEventKind = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("occurred", 1), ("cleared", 2), ("unknown", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmEventKind.setStatus('deprecated')
xupsAlarmEventDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmEventDescr.setStatus('deprecated')
xupsAlarmEventMsg = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 7, 19, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsAlarmEventMsg.setStatus('mandatory')
xupsBreakerOpen = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 20))
xupsAlarmEntryAdded = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 21))
xupsAlarmEntryRemoved = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 22))
xupsAlarmBatteryBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 23))
xupsOutputOffAsRequested = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 24))
xupsDiagnosticTestFailed = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 25))
xupsCommunicationsLost = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 26))
xupsUpsShutdownPending = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 27))
xupsAlarmTestInProgress = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 28))
xupsAmbientTempBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 29))
xupsLossOfRedundancy = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 30))
xupsAlarmTempBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 31))
xupsAlarmChargerFailed = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 32))
xupsAlarmFanFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 33))
xupsAlarmFuseFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 34))
xupsPowerSwitchBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 35))
xupsModuleFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 36))
xupsOnAlternatePowerSource = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 37))
xupsAltPowerNotAvailable = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 38))
xupsNoticeCondition = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 39))
xupsRemoteTempBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 40))
xupsRemoteHumidityBad = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 7, 41))
xupsTestBattery = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 8, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("startTest", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsTestBattery.setStatus('mandatory')
xupsTestBatteryStatus = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 8, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("unknown", 1), ("passed", 2), ("failed", 3), ("inProgress", 4), ("notSupported", 5), ("inhibited", 6), ("scheduled", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsTestBatteryStatus.setStatus('mandatory')
xupsControlOutputOffDelay = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsControlOutputOffDelay.setStatus('mandatory')
xupsControlOutputOnDelay = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsControlOutputOnDelay.setStatus('mandatory')
xupsControlOutputOffTrapDelay = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsControlOutputOffTrapDelay.setStatus('mandatory')
xupsControlOutputOnTrapDelay = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsControlOutputOnTrapDelay.setStatus('deprecated')
xupsControlToBypassDelay = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsControlToBypassDelay.setStatus('mandatory')
xupsLoadShedSecsWithRestart = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 9, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsLoadShedSecsWithRestart.setStatus('mandatory')
xupsConfigOutputVoltage = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigOutputVoltage.setStatus('mandatory')
xupsConfigInputVoltage = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigInputVoltage.setStatus('mandatory')
xupsConfigOutputWatts = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigOutputWatts.setStatus('mandatory')
xupsConfigOutputFreq = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigOutputFreq.setStatus('mandatory')
xupsConfigDateAndTime = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 22))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsConfigDateAndTime.setStatus('mandatory')
xupsConfigLowOutputVoltageLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigLowOutputVoltageLimit.setStatus('mandatory')
xupsConfigHighOutputVoltageLimit = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 10, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsConfigHighOutputVoltageLimit.setStatus('mandatory')
xupsMaxTrapLevel = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 11, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("critical", 2), ("major", 3), ("allTraps", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsMaxTrapLevel.setStatus('mandatory')
xupsSendTrapType = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 11, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("stnd", 1), ("xups", 2), ("stndPlus", 3), ("xupsPlus", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsSendTrapType.setStatus('mandatory')
xupsTrapMessage = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 11, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79)))
if mibBuilder.loadTexts: xupsTrapMessage.setStatus('mandatory')
xupsNumReceptacles = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 12, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsNumReceptacles.setStatus('mandatory')
xupsRecepTable = MibTable((1, 3, 6, 1, 4, 1, 534, 1, 12, 2), )
if mibBuilder.loadTexts: xupsRecepTable.setStatus('mandatory')
xupsRecepEntry = MibTableRow((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1), ).setIndexNames((0, "XUPS-MIB", "xupsRecepIndex"))
if mibBuilder.loadTexts: xupsRecepEntry.setStatus('mandatory')
xupsRecepIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsRecepIndex.setStatus('mandatory')
xupsRecepStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("pendingOff", 3), ("pendingOn", 4), ("unknown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsRecepStatus.setStatus('mandatory')
xupsRecepOffDelaySecs = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsRecepOffDelaySecs.setStatus('mandatory')
xupsRecepOnDelaySecs = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsRecepOnDelaySecs.setStatus('mandatory')
xupsRecepAutoOffDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 32767))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsRecepAutoOffDelay.setStatus('mandatory')
xupsRecepAutoOnDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 32767))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsRecepAutoOnDelay.setStatus('mandatory')
xupsRecepShedSecsWithRestart = MibTableColumn((1, 3, 6, 1, 4, 1, 534, 1, 12, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsRecepShedSecsWithRestart.setStatus('mandatory')
xupsTopologyType = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 13, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsTopologyType.setStatus('mandatory')
xupsTopoMachineCode = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 13, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsTopoMachineCode.setStatus('mandatory')
xupsTopoUnitNumber = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 13, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xupsTopoUnitNumber.setStatus('mandatory')
xupsTopoPowerStrategy = MibScalar((1, 3, 6, 1, 4, 1, 534, 1, 13, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("highAlert", 1), ("standard", 2), ("enableHighEfficiency", 3), ("immediateHighEfficiency", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xupsTopoPowerStrategy.setStatus('mandatory')
xupsNull = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 0))
xupsTrapBasic = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 0, 0))
xupsTrapSource = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 11, 4))
xupsTrapDefined = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1))
xupsTrapPortN = MibIdentifier((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2))
xupstbControlOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,1))
xupstbControlOn = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,2))
xupstbOnBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,3))
xupstbLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,4))
xupstbUtilityPowerRestored = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,5))
xupstbReturnFromLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,6))
xupstbOutputOverload = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,7))
xupstbInternalFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,8))
xupstbBatteryDischarged = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,9))
xupstbInverterFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,10))
xupstbOnBypass = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,11))
xupstbBypassNotAvailable = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,12))
xupstbOutputOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,13))
xupstbInputFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,14))
xupstbBuildingAlarm = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,15))
xupstbShutdownImminent = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,16))
xupstbOnInverter = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,17))
xupstbBreakerOpen = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,20))
xupstbAlarmEntryAdded = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,21))
xupstbAlarmEntryRemoved = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 0, 0) + (0,22))
xupstdControlOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,1)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdControlOn = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,2)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOnBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,3)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,4)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdUtilityPowerRestored = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,5)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdReturnFromLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,6)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOutputOverload = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,7)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdInternalFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,8)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdBatteryDischarged = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,9)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdInverterFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,10)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOnBypass = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,11)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdBypassNotAvailable = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,12)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOutputOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,13)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdInputFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,14)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdBuildingAlarm = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,15)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdShutdownImminent = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,16)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOnInverter = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,17)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdBreakerOpen = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,20)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmEntryAdded = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,21)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmEntryRemoved = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,22)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmBatteryBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,23)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOutputOffAsRequested = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,24)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdDiagnosticTestFailed = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,25)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdCommunicationsLost = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,26)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdUpsShutdownPending = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,27)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmTestInProgress = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,28)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAmbientTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,29)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("XUPS-MIB", "xupsEnvAmbientTemp"), ("XUPS-MIB", "xupsEnvAmbientLowerLimit"), ("XUPS-MIB", "xupsEnvAmbientUpperLimit"))
xupstdContactActiveNotice = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,30)).setObjects(("XUPS-MIB", "xupsContactIndex"), ("XUPS-MIB", "xupsContactType"), ("XUPS-MIB", "xupsContactState"), ("XUPS-MIB", "xupsContactDescr"))
xupstdContactInactiveNotice = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,31)).setObjects(("XUPS-MIB", "xupsContactIndex"), ("XUPS-MIB", "xupsContactType"), ("XUPS-MIB", "xupsContactState"), ("XUPS-MIB", "xupsContactDescr"))
xupstdLossOfRedundancy = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,32)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,33)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmChargerFailed = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,34)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmFanFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,35)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdAlarmFuseFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,36)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdPowerSwitchBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,37)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdModuleFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,38)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdOnAlternatePowerSource = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,39)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("XUPS-MIB", "xupsInputSource"))
xupstdAltPowerNotAvailable = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,40)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdNoticeCondition = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,41)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"))
xupstdRemoteTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,42)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("XUPS-MIB", "xupsEnvRemoteTemp"), ("XUPS-MIB", "xupsEnvRemoteTempLowerLimit"), ("XUPS-MIB", "xupsEnvRemoteTempUpperLimit"))
xupstdRemoteHumidityBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 1) + (0,43)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("XUPS-MIB", "xupsEnvRemoteHumidity"), ("XUPS-MIB", "xupsEnvRemoteHumidityLowerLimit"), ("XUPS-MIB", "xupsEnvRemoteHumidityUpperLimit"))
xupstpControlOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,1)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpControlOn = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,2)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOnBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,3)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,4)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpUtilityPowerRestored = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,5)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpReturnFromLowBattery = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,6)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOutputOverload = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,7)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpInternalFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,8)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpBatteryDischarged = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,9)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpInverterFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,10)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOnBypass = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,11)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpBypassNotAvailable = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,12)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOutputOff = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,13)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpInputFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,14)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpBuildingAlarm = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,15)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpShutdownImminent = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,16)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOnInverter = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,17)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpBreakerOpen = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,20)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmEntryAdded = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,21)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmEntryRemoved = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,22)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmBatteryBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,23)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOutputOffAsRequested = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,24)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpDiagnosticTestFailed = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,25)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpCommunicationsLost = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,26)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpUpsShutdownPending = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,27)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmTestInProgress = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,28)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAmbientTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,29)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"), ("XUPS-MIB", "xupsEnvAmbientTemp"), ("XUPS-MIB", "xupsEnvAmbientLowerLimit"), ("XUPS-MIB", "xupsEnvAmbientUpperLimit"))
xupstpLossOfRedundancy = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,32)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,33)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmChargerFailed = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,34)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmFanFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,35)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAlarmFuseFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,36)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpPowerSwitchBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,37)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpModuleFailure = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,38)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpOnAlternatePowerSource = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,39)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("XUPS-MIB", "xupsInputSource"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpAltPowerNotAvailable = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,40)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpNoticeCondition = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,41)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
xupstpRemoteTempBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,42)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"), ("XUPS-MIB", "xupsEnvRemoteTemp"), ("XUPS-MIB", "xupsEnvRemoteTempLowerLimit"), ("XUPS-MIB", "xupsEnvRemoteTempUpperLimit"))
xupstpRemoteHumidityBad = NotificationType((1, 3, 6, 1, 4, 1, 534, 1, 11, 4, 2) + (0,43)).setObjects(("XUPS-MIB", "xupsAlarmID"), ("XUPS-MIB", "xupsAlarmDescr"), ("XUPS-MIB", "xupsTrapMessage"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"), ("XUPS-MIB", "xupsEnvRemoteHumidity"), ("XUPS-MIB", "xupsEnvRemoteHumidityLowerLimit"), ("XUPS-MIB", "xupsEnvRemoteHumidityUpperLimit"))
mibBuilder.exportSymbols("XUPS-MIB", xupsOutputFrequency=xupsOutputFrequency, xupsObjectId=xupsObjectId, xupsBypassFrequency=xupsBypassFrequency, xupsContactState=xupsContactState, xupsControlOutputOnTrapDelay=xupsControlOutputOnTrapDelay, xupstdControlOn=xupstdControlOn, xupsEnvRemoteTempUpperLimit=xupsEnvRemoteTempUpperLimit, xupsEnvAmbientUpperLimit=xupsEnvAmbientUpperLimit, xupstpPowerSwitchBad=xupstpPowerSwitchBad, xupsBypassEntry=xupsBypassEntry, xupsAlarmFanFailure=xupsAlarmFanFailure, xupsInputSource=xupsInputSource, xupstbControlOff=xupstbControlOff, xupsAlarmTime=xupsAlarmTime, xupsOutputSource=xupsOutputSource, xupstdNoticeCondition=xupstdNoticeCondition, xupstdBypassNotAvailable=xupstdBypassNotAvailable, xupstdAlarmFuseFailure=xupstdAlarmFuseFailure, xupstdOnBattery=xupstdOnBattery, xupstbBypassNotAvailable=xupstbBypassNotAvailable, xupsControl=xupsControl, xupstdAltPowerNotAvailable=xupstdAltPowerNotAvailable, xupstdContactActiveNotice=xupstdContactActiveNotice, xupsAlarmID=xupsAlarmID, xupsTopology=xupsTopology, connectUPSAdapterEthernet=connectUPSAdapterEthernet, xupsEnvNumContacts=xupsEnvNumContacts, xupsOutputCurrent=xupsOutputCurrent, xupstpModuleFailure=xupstpModuleFailure, xupsAlarmChargerFailed=xupsAlarmChargerFailed, xupstdRemoteTempBad=xupstdRemoteTempBad, xupsBatteryDischarged=xupsBatteryDischarged, xupstdOutputOff=xupstdOutputOff, xupsAlarmEventKind=xupsAlarmEventKind, xupsIdent=xupsIdent, xupstpInputFailure=xupstpInputFailure, xupsInputNumPhases=xupsInputNumPhases, xupsInputTable=xupsInputTable, xupsEnvRemoteHumidityLowerLimit=xupsEnvRemoteHumidityLowerLimit, xupsTestBattery=xupsTestBattery, xupsTrapMessage=xupsTrapMessage, xupsInputFrequency=xupsInputFrequency, xupsContactDescr=xupsContactDescr, xupsEnvAmbientLowerLimit=xupsEnvAmbientLowerLimit, xupsTrapBasic=xupsTrapBasic, xupsEnvRemoteHumidity=xupsEnvRemoteHumidity, xupsSendTrapType=xupsSendTrapType, xupstpOutputOff=xupstpOutputOff, xupsBypassNotAvailable=xupsBypassNotAvailable, xupsOutputTable=xupsOutputTable, xupsEnvironment=xupsEnvironment, xupstdShutdownImminent=xupstdShutdownImminent, xupstpBreakerOpen=xupstpBreakerOpen, xupstpRemoteTempBad=xupstpRemoteTempBad, xupstbOutputOff=xupstbOutputOff, xupstdOnInverter=xupstdOnInverter, xupsInput=xupsInput, xupsOutputNumPhases=xupsOutputNumPhases, xupsOutputEntry=xupsOutputEntry, xupsConfigOutputVoltage=xupsConfigOutputVoltage, xupstdAlarmBatteryBad=xupstdAlarmBatteryBad, xupsAlarmTable=xupsAlarmTable, xupstpAlarmFuseFailure=xupstpAlarmFuseFailure, xupstdRemoteHumidityBad=xupstdRemoteHumidityBad, xupstbReturnFromLowBattery=xupstbReturnFromLowBattery, xupstpLossOfRedundancy=xupstpLossOfRedundancy, xupsAltPowerNotAvailable=xupsAltPowerNotAvailable, xupsBypassNumPhases=xupsBypassNumPhases, xupsOutputOverload=xupsOutputOverload, xupstdAlarmEntryAdded=xupstdAlarmEntryAdded, xupstdLossOfRedundancy=xupstdLossOfRedundancy, xupsContactType=xupsContactType, xupstdReturnFromLowBattery=xupstdReturnFromLowBattery, xupsConfigHighOutputVoltageLimit=xupsConfigHighOutputVoltageLimit, xupstpOnAlternatePowerSource=xupstpOnAlternatePowerSource, xupstbBuildingAlarm=xupstbBuildingAlarm, xupstdContactInactiveNotice=xupstdContactInactiveNotice, xupsRecepIndex=xupsRecepIndex, xupsAlarmEventEntry=xupsAlarmEventEntry, xupsIdentModel=xupsIdentModel, xupstdUtilityPowerRestored=xupstdUtilityPowerRestored, xupstdAlarmEntryRemoved=xupstdAlarmEntryRemoved, xupstdAlarmTestInProgress=xupstdAlarmTestInProgress, xupsBatVoltage=xupsBatVoltage, xupstpAlarmTempBad=xupstpAlarmTempBad, xupsAlarm=xupsAlarm, xupstdInputFailure=xupstdInputFailure, xupstpControlOff=xupstpControlOff, xupsInputEntry=xupsInputEntry, xupstpBypassNotAvailable=xupstpBypassNotAvailable, xupsEnvRemoteTempLowerLimit=xupsEnvRemoteTempLowerLimit, xupstpDiagnosticTestFailed=xupstpDiagnosticTestFailed, xupsInputFailure=xupsInputFailure, xupsAlarmEntry=xupsAlarmEntry, xupsIdentOemCode=xupsIdentOemCode, xupstbBatteryDischarged=xupstbBatteryDischarged, xupsBatteryAbmStatus=xupsBatteryAbmStatus, xupstdAmbientTempBad=xupstdAmbientTempBad, xupsBattery=xupsBattery, xupstpAmbientTempBad=xupstpAmbientTempBad, xupsAlarmEventMsg=xupsAlarmEventMsg, xupsRemoteHumidityBad=xupsRemoteHumidityBad, xupsAlarmEventID=xupsAlarmEventID, xupstpBatteryDischarged=xupstpBatteryDischarged, xupstdOnBypass=xupstdOnBypass, xupsRecepTable=xupsRecepTable, xupstpLowBattery=xupstpLowBattery, xupsNumReceptacles=xupsNumReceptacles, xupsNull=xupsNull, xupstbInputFailure=xupstbInputFailure, xupstdBreakerOpen=xupstdBreakerOpen, xupstpUpsShutdownPending=xupstpUpsShutdownPending, xupsReturnFromLowBattery=xupsReturnFromLowBattery, xupsConfigOutputFreq=xupsConfigOutputFreq, xupsRecepOffDelaySecs=xupsRecepOffDelaySecs, xupstpAlarmTestInProgress=xupstpAlarmTestInProgress, xupstdAlarmChargerFailed=xupstdAlarmChargerFailed, xupstpAltPowerNotAvailable=xupstpAltPowerNotAvailable, xupsOnAlternatePowerSource=xupsOnAlternatePowerSource, xupstbUtilityPowerRestored=xupstbUtilityPowerRestored, xupsAlarmEventDescr=xupsAlarmEventDescr, xupsUpsShutdownPending=xupsUpsShutdownPending, xupsAlarmFuseFailure=xupsAlarmFuseFailure, xupsRecepAutoOnDelay=xupsRecepAutoOnDelay, xupstdCommunicationsLost=xupstdCommunicationsLost, xupstdAlarmFanFailure=xupstdAlarmFanFailure, xups=xups, xupsContactSenseTable=xupsContactSenseTable, xupstbOnBypass=xupstbOnBypass, xupsEnvRemoteHumidityUpperLimit=xupsEnvRemoteHumidityUpperLimit, xupstdOnAlternatePowerSource=xupstdOnAlternatePowerSource, xupstpAlarmChargerFailed=xupstpAlarmChargerFailed, xupsTrapDefined=xupsTrapDefined, xupstpCommunicationsLost=xupstpCommunicationsLost, xupsAlarmEventTable=xupsAlarmEventTable, xupstpOutputOverload=xupstpOutputOverload, xupsBypass=xupsBypass, xupsAlarmTempBad=xupsAlarmTempBad, xupstdDiagnosticTestFailed=xupstdDiagnosticTestFailed, xupsRecep=xupsRecep, xupsLossOfRedundancy=xupsLossOfRedundancy, xupsBatCurrent=xupsBatCurrent, xupsTrapPortN=xupsTrapPortN, xupsBatTimeRemaining=xupsBatTimeRemaining, xupsAlarmEntryRemoved=xupsAlarmEntryRemoved, xupstpOutputOffAsRequested=xupstpOutputOffAsRequested, xupsAlarmDescr=xupsAlarmDescr, xupsEnvAmbientHumidity=xupsEnvAmbientHumidity, xupsEnvRemoteTemp=xupsEnvRemoteTemp, xupsBypassVoltage=xupsBypassVoltage, xupsBuildingAlarm=xupsBuildingAlarm, xupstbOnInverter=xupstbOnInverter, xupstbInternalFailure=xupstbInternalFailure, xupsRemoteTempBad=xupsRemoteTempBad, xupsControlOutputOffDelay=xupsControlOutputOffDelay, xupsTrapSource=xupsTrapSource, xupstbShutdownImminent=xupstbShutdownImminent, xupstdInverterFailure=xupstdInverterFailure, xupstdOutputOverload=xupstdOutputOverload, powerwareNetworkDigitalIOEther=powerwareNetworkDigitalIOEther, xupsInputLineBads=xupsInputLineBads, xupstpUtilityPowerRestored=xupstpUtilityPowerRestored, xupsAmbientTempBad=xupsAmbientTempBad, xupstdInternalFailure=xupstdInternalFailure, xupsTest=xupsTest, xupsOutputPhase=xupsOutputPhase, xupsInternalFailure=xupsInternalFailure, xupstbOnBattery=xupstbOnBattery, xupsOutputOff=xupsOutputOff, xupsRecepOnDelaySecs=xupsRecepOnDelaySecs, xupsConfig=xupsConfig, xupstbAlarmEntryRemoved=xupstbAlarmEntryRemoved, xupsRecepEntry=xupsRecepEntry, xupstdOutputOffAsRequested=xupstdOutputOffAsRequested, xupstbInverterFailure=xupstbInverterFailure, xupsTopoUnitNumber=xupsTopoUnitNumber, xupsNoticeCondition=xupsNoticeCondition, xupsContactIndex=xupsContactIndex, xupsConfigDateAndTime=xupsConfigDateAndTime, xupstpInternalFailure=xupstpInternalFailure, xupsPowerSwitchBad=xupsPowerSwitchBad, powerwareEthernetSnmpAdapter=powerwareEthernetSnmpAdapter, xupsOnBattery=xupsOnBattery, xupsLowBattery=xupsLowBattery, powerwareNetworkSnmpAdapterEther=powerwareNetworkSnmpAdapterEther, xupstpAlarmBatteryBad=xupstpAlarmBatteryBad, xupsTrapControl=xupsTrapControl, xupstpAlarmEntryRemoved=xupstpAlarmEntryRemoved, simpleSnmpAdapter=simpleSnmpAdapter, onlinetDaemon=onlinetDaemon, xupsBreakerOpen=xupsBreakerOpen, xupsTopoPowerStrategy=xupsTopoPowerStrategy, xupstbControlOn=xupstbControlOn, xupstdBuildingAlarm=xupstdBuildingAlarm, xupsOnInverter=xupsOnInverter, xupsControlOutputOffTrapDelay=xupsControlOutputOffTrapDelay, xupsAlarmBatteryBad=xupsAlarmBatteryBad, xupstbAlarmEntryAdded=xupstbAlarmEntryAdded, xupsAlarmEntryAdded=xupsAlarmEntryAdded, xupsInputVoltage=xupsInputVoltage, xupsConfigLowOutputVoltageLimit=xupsConfigLowOutputVoltageLimit, xupstdUpsShutdownPending=xupstdUpsShutdownPending, xupsAlarmEventDateAndTime=xupsAlarmEventDateAndTime, xupstbOutputOverload=xupstbOutputOverload, xupsInputWatts=xupsInputWatts, connectUPSAdapterTokenRing=connectUPSAdapterTokenRing, xupstpOnInverter=xupstpOnInverter, xupsInputPhase=xupsInputPhase, xupsContactsTableEntry=xupsContactsTableEntry, xupstpAlarmFanFailure=xupstpAlarmFanFailure, xupsLoadShedSecsWithRestart=xupsLoadShedSecsWithRestart, xupsEnvAmbientTemp=xupsEnvAmbientTemp, xupsRecepShedSecsWithRestart=xupsRecepShedSecsWithRestart, xupsOutputOffAsRequested=xupsOutputOffAsRequested, xupstpInverterFailure=xupstpInverterFailure, xupsBypassTable=xupsBypassTable, xupsRecepAutoOffDelay=xupsRecepAutoOffDelay, xupsModuleFailure=xupsModuleFailure, xupstdLowBattery=xupstdLowBattery, xupsAlarmNumEvents=xupsAlarmNumEvents, xupsUtilityPowerRestored=xupsUtilityPowerRestored, xupsConfigInputVoltage=xupsConfigInputVoltage, xupsInverterFailure=xupsInverterFailure, xupstpReturnFromLowBattery=xupstpReturnFromLowBattery, xupsOutput=xupsOutput, xupsTestBatteryStatus=xupsTestBatteryStatus, xupsOutputVoltage=xupsOutputVoltage, xupstpRemoteHumidityBad=xupstpRemoteHumidityBad, xupsOutputLoad=xupsOutputLoad, xupsBypassPhase=xupsBypassPhase, xupsIdentManufacturer=xupsIdentManufacturer, xupsAlarms=xupsAlarms, xupstdAlarmTempBad=xupstdAlarmTempBad, xupstpOnBypass=xupstpOnBypass, xupstpShutdownImminent=xupstpShutdownImminent, xupsOnBypass=xupsOnBypass, xupsBatCapacity=xupsBatCapacity, xupsTopoMachineCode=xupsTopoMachineCode, xupsRecepStatus=xupsRecepStatus, powerware=powerware, xupsInputCurrent=xupsInputCurrent, xupstbLowBattery=xupstbLowBattery, xupstdPowerSwitchBad=xupstdPowerSwitchBad, xupsShutdownImminent=xupsShutdownImminent, xupsAlarmTestInProgress=xupsAlarmTestInProgress, xupsCommunicationsLost=xupsCommunicationsLost, powerwareNetworkSnmpAdapterToken=powerwareNetworkSnmpAdapterToken, xupstpAlarmEntryAdded=xupstpAlarmEntryAdded, xupsConfigOutputWatts=xupsConfigOutputWatts, xupstbBreakerOpen=xupstbBreakerOpen, xupstpOnBattery=xupstpOnBattery, xupsDiagnosticTestFailed=xupsDiagnosticTestFailed, xupstpControlOn=xupstpControlOn, xupsControlToBypassDelay=xupsControlToBypassDelay)
mibBuilder.exportSymbols("XUPS-MIB", xupstpBuildingAlarm=xupstpBuildingAlarm, xupsControlOutputOnDelay=xupsControlOutputOnDelay, xupstdBatteryDischarged=xupstdBatteryDischarged, xupstdControlOff=xupstdControlOff, xupsOutputWatts=xupsOutputWatts, xupsTopologyType=xupsTopologyType, xupstdModuleFailure=xupstdModuleFailure, xupstpNoticeCondition=xupstpNoticeCondition, xupsIdentSoftwareVersion=xupsIdentSoftwareVersion, xupsMaxTrapLevel=xupsMaxTrapLevel)
| true | true |
1c3aaefe05715e4b4a91554a88789cdcc9ea6606 | 677 | py | Python | sheepdoge/app.py | mattjmcnaughton/sheepdoge | 9c028d6f51cb59afcaf25a5680f961ec7e25676b | [
"Apache-2.0"
] | 7 | 2018-03-18T07:25:10.000Z | 2022-01-28T17:35:08.000Z | sheepdoge/app.py | mattjmcnaughton/sheepdoge | 9c028d6f51cb59afcaf25a5680f961ec7e25676b | [
"Apache-2.0"
] | 15 | 2017-08-19T14:03:10.000Z | 2017-12-29T23:22:05.000Z | sheepdoge/app.py | mattjmcnaughton/sheepdoge | 9c028d6f51cb59afcaf25a5680f961ec7e25676b | [
"Apache-2.0"
] | null | null | null | """Orchestrates the different `sheepdoge` operations."""
from sheepdoge.config import Config
from sheepdoge.action import Action # pylint: disable=unused-import
class Sheepdoge(object):
"""A class we instantiate with instances of the `Action`, which indicate
which cli command we'll perform.
:param action: The Sheepdoge action we're running.
"""
def __init__(self, action, config=None):
# type: (Action, Config) -> None
self._action = action
self._config = config or Config.get_config_singleton()
def run(self):
# type: () -> None
"""Execute an command given to `sheepdoge`."""
self._action.run()
| 29.434783 | 76 | 0.66322 |
from sheepdoge.config import Config
from sheepdoge.action import Action
class Sheepdoge(object):
def __init__(self, action, config=None):
self._action = action
self._config = config or Config.get_config_singleton()
def run(self):
self._action.run()
| true | true |
1c3ab0b0932c8237da05d6071187d05b66064048 | 3,915 | py | Python | torchpq/legacy/IVFPQTopk.py | mhamilton723/TorchPQ | f3d560ec04d9c741943fa930a1257c9be9445cbe | [
"MIT"
] | 103 | 2021-02-10T18:01:56.000Z | 2022-03-30T21:35:05.000Z | torchpq/legacy/IVFPQTopk.py | mhamilton723/TorchPQ | f3d560ec04d9c741943fa930a1257c9be9445cbe | [
"MIT"
] | 9 | 2021-05-28T14:52:33.000Z | 2022-03-03T13:09:25.000Z | torchpq/legacy/IVFPQTopk.py | mhamilton723/TorchPQ | f3d560ec04d9c741943fa930a1257c9be9445cbe | [
"MIT"
] | 10 | 2021-04-24T04:25:24.000Z | 2022-02-24T07:30:42.000Z | import torch
import numpy as np
import math
from ..kernels import ComputeProductCuda
class IVFPQTopk:
def __init__(self,
n_subvectors,
n_clusters,
n_cs=4,
):
assert torch.cuda.is_available()
self.n_subvectors = n_subvectors
self.n_clusters = n_clusters
self.n_cs = n_cs
self.sm_size = n_subvectors * 256 * 4
self.compute_product = ComputeProductCuda(
m=n_subvectors,
k=n_clusters,
n_cs=n_cs,
sm_size=self.sm_size
)
@staticmethod
def remaining_memory():
if torch.cuda.is_available():
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
remaining = total_memory - torch.cuda.memory_reserved()
else:
remaining = 0
return remaining
def get_similarity(self, data, precomputed, is_empty, div_start, div_size):
max_out_size = div_size.sum(dim=1).max().item()
n_subvectors, n_query, n_clusters = precomputed.shape
n_probe = div_start.shape[1]
values, indices = self.compute_product(
data = data,
precomputed = precomputed,
is_empty = is_empty,
div_start = div_start,
div_size = div_size,
max_out_size = max_out_size,
)
return values, indices
def __call__(self, k, data, precomputed, is_empty, div_start, div_size):
"""
k: dtype : int
data: shape : [n_subvectors // n_cs, n_data, n_cs], dtype : uint8
precomputed: shape : [n_subvectors, n_query, n_clusters], dtype : float32
is_empty: shape : [n_data], dtype : uint8
div_start: shape : [n_query, n_probe], dtype : int32
div_size: shape : [n_query, n_probe], dtype : int32
"""
max_out_size = div_size.sum(dim=1).max().item()
n_subvectors, n_query, n_clusters = precomputed.shape
n_probe = div_start.shape[1]
final_v = torch.zeros(n_query, k, device="cuda:0", dtype=torch.float32)
final_i = torch.zeros(n_query, k, device="cuda:0", dtype=torch.int32)
remaining = self.remaining_memory()
n_partitions = 1
while True:
if n_partitions > n_query:
raise RuntimeError("No enough GPU memory")
sub_n_query = math.ceil(n_query / n_partitions)
required = sub_n_query * max_out_size * 2 * 4
if n_partitions > 1:
required += sub_n_query * n_subvectors * n_clusters * 4
required += sub_n_query * n_probe * 2 * 4
if required <= remaining:
break
n_partitions *= 2
for i in range(n_partitions):
start = i * sub_n_query
end = (i+1) * sub_n_query
if end > n_query:
end = n_query
if n_partitions > 1:
sub_precomputed = precomputed[:, start:end].contiguous()
sub_div_start = div_start[start:end].contiguous()
sub_div_size = div_size[start:end].contiguous()
sub_mos = sub_div_size.sum(dim=1).max().item()
else:
sub_precomputed = precomputed
sub_div_start = div_start
sub_div_size = div_size
sub_mos = max_out_size
sub_v, sub_i = self.compute_product(
data = data,
precomputed = sub_precomputed,
is_empty = is_empty,
div_start = sub_div_start,
div_size = sub_div_size,
max_out_size = sub_mos,
)
del sub_precomputed
sub_k = min(k, sub_mos)
sorted_v, sorted_i = torch.topk(sub_v, dim=-1, k=sub_k)
del sub_v
final_v[start:end, :sub_k] = sorted_v
del sorted_v
final_i[start:end, :sub_k] = torch.gather(input=sub_i, index=sorted_i, dim=1)
del sub_i, sorted_i
### TEST
# def naive_pqd(data, distances, is_empty):
# o, n, q = data.shape
# m = o * q
# arange = torch.arange(m, device="cuda:0")
# data = data.transpose(0, 1).reshape(n,m)
# data = data[~is_empty ]
# result = distances[arange, :, data[:].long() ].sum(dim=1).t()
# return result
return (final_v, final_i) | 32.090164 | 83 | 0.637292 | import torch
import numpy as np
import math
from ..kernels import ComputeProductCuda
class IVFPQTopk:
def __init__(self,
n_subvectors,
n_clusters,
n_cs=4,
):
assert torch.cuda.is_available()
self.n_subvectors = n_subvectors
self.n_clusters = n_clusters
self.n_cs = n_cs
self.sm_size = n_subvectors * 256 * 4
self.compute_product = ComputeProductCuda(
m=n_subvectors,
k=n_clusters,
n_cs=n_cs,
sm_size=self.sm_size
)
@staticmethod
def remaining_memory():
if torch.cuda.is_available():
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
remaining = total_memory - torch.cuda.memory_reserved()
else:
remaining = 0
return remaining
def get_similarity(self, data, precomputed, is_empty, div_start, div_size):
max_out_size = div_size.sum(dim=1).max().item()
n_subvectors, n_query, n_clusters = precomputed.shape
n_probe = div_start.shape[1]
values, indices = self.compute_product(
data = data,
precomputed = precomputed,
is_empty = is_empty,
div_start = div_start,
div_size = div_size,
max_out_size = max_out_size,
)
return values, indices
def __call__(self, k, data, precomputed, is_empty, div_start, div_size):
max_out_size = div_size.sum(dim=1).max().item()
n_subvectors, n_query, n_clusters = precomputed.shape
n_probe = div_start.shape[1]
final_v = torch.zeros(n_query, k, device="cuda:0", dtype=torch.float32)
final_i = torch.zeros(n_query, k, device="cuda:0", dtype=torch.int32)
remaining = self.remaining_memory()
n_partitions = 1
while True:
if n_partitions > n_query:
raise RuntimeError("No enough GPU memory")
sub_n_query = math.ceil(n_query / n_partitions)
required = sub_n_query * max_out_size * 2 * 4
if n_partitions > 1:
required += sub_n_query * n_subvectors * n_clusters * 4
required += sub_n_query * n_probe * 2 * 4
if required <= remaining:
break
n_partitions *= 2
for i in range(n_partitions):
start = i * sub_n_query
end = (i+1) * sub_n_query
if end > n_query:
end = n_query
if n_partitions > 1:
sub_precomputed = precomputed[:, start:end].contiguous()
sub_div_start = div_start[start:end].contiguous()
sub_div_size = div_size[start:end].contiguous()
sub_mos = sub_div_size.sum(dim=1).max().item()
else:
sub_precomputed = precomputed
sub_div_start = div_start
sub_div_size = div_size
sub_mos = max_out_size
sub_v, sub_i = self.compute_product(
data = data,
precomputed = sub_precomputed,
is_empty = is_empty,
div_start = sub_div_start,
div_size = sub_div_size,
max_out_size = sub_mos,
)
del sub_precomputed
sub_k = min(k, sub_mos)
sorted_v, sorted_i = torch.topk(sub_v, dim=-1, k=sub_k)
del sub_v
final_v[start:end, :sub_k] = sorted_v
del sorted_v
final_i[start:end, :sub_k] = torch.gather(input=sub_i, index=sorted_i, dim=1)
del sub_i, sorted_i
return (final_v, final_i) | true | true |
1c3ab1668f9b0b952c25c52d791afe7807b62859 | 10,702 | py | Python | tanjun/__init__.py | thesadru/Tanjun | 977eacbd2b0eeafd14a30450aceeb9e329703edd | [
"BSD-3-Clause"
] | null | null | null | tanjun/__init__.py | thesadru/Tanjun | 977eacbd2b0eeafd14a30450aceeb9e329703edd | [
"BSD-3-Clause"
] | null | null | null | tanjun/__init__.py | thesadru/Tanjun | 977eacbd2b0eeafd14a30450aceeb9e329703edd | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2022, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A flexible command framework designed to extend Hikari.
Examples
--------
A Tanjun client can be quickly initialised from a Hikari gateway bot through
`tanjun.Client.from_gateway_bot`, this enables both slash (interaction) and message
command execution:
```py
bot = hikari.GatewayBot("BOT_TOKEN")
# As a note, unless event_managed=False is passed here then this client
# will be managed based on gateway startup and stopping events.
# mention_prefix=True instructs the client to also set mention prefixes on the
# first startup.
client = tanjun.Client.from_gateway_bot(bot, declare_global_commands=True, mention_prefix=True)
component = tanjun.Component()
client.add_component(component)
# Declare a message command with some basic parser logic.
@component.with_command
@tanjun.with_greedy_argument("name", default="World")
@tanjun.as_message_command("test")
async def test_command(ctx: tanjun.abc.Context, name: str) -> None:
await ctx.respond(f"Hello, {name}!")
# Declare a ping slash command
@component.with_command
@tanjun.with_user_slash_option("user", "The user facing command option's description", default=None)
@tanjun.as_slash_command("hello", "The command's user facing description")
async def hello(ctx: tanjun.abc.Context, user: hikari.User | None) -> None:
user = user or ctx.author
await ctx.respond(f"Hello, {user}!")
```
Alternatively, the client can also be built from a RESTBot but this will only
enable slash (interaction) command execution:
```py
bot = hikari.RESTBot("BOT_TOKEN", "Bot")
# declare_global_commands=True instructs the client to set the global commands
# for the relevant bot on first startup (this will replace any previously
# declared commands).
client = tanjun.Client.from_rest_bot(bot, declare_global_commands=True)
# This will load components from modules based on loader functions.
# For more information on this see `tanjun.as_loader`.
client.load_modules("module.paths")
# Note, unlike a gateway bound bot, the rest bot will not automatically start
# itself due to the lack of Hikari lifetime events in this environment and
# will have to be started after the Hikari client.
async def main() -> None:
await bot.start()
async with client.open():
await bot.join()
```
For more extensive examples see the
[repository's examples](https://github.com/FasterSpeeding/Tanjun/tree/master/examples).
There are also
[written tutorials](https://patchwork.systems/programming/hikari-discord-bot/index.html)
that cover making a bot from scratch through to advanced concepts like Dependency Injection.
"""
from __future__ import annotations
__all__: list[str] = [
# __init__.py
"AnyHooks",
"BucketResource",
"Client",
"ClientCallbackNames",
"CommandError",
"Component",
"ConversionError",
"FailedCheck",
"FailedModuleLoad",
"FailedModuleUnload",
"HaltExecution",
"Hooks",
"InMemoryConcurrencyLimiter",
"InMemoryCooldownManager",
"LazyConstant",
"MenuCommand",
"MessageAcceptsEnum",
"MessageCommand",
"MessageCommandGroup",
"MessageHooks",
"MissingDependencyError",
"ModuleMissingLoaders",
"ModuleStateConflict",
"NotEnoughArgumentsError",
"ParserError",
"ShlexParser",
"SlashCommand",
"SlashCommandGroup",
"SlashHooks",
"TanjunError",
"TooManyArgumentsError",
"__author__",
"__ci__",
"__copyright__",
"__coverage__",
"__docs__",
"__email__",
"__issue_tracker__",
"__license__",
"__url__",
"__version__",
"abc",
"as_interval",
"as_loader",
"as_message_command",
"as_message_command_group",
"as_message_menu",
"as_self_injecting",
"as_slash_command",
"as_unloader",
"as_user_menu",
"cached_inject",
"checks",
"clients",
"commands",
"components",
"context",
"conversion",
"dependencies",
"errors",
"hooks",
"inject",
"inject_lc",
"injected",
"injecting",
"parsing",
"schedules",
"slash_command_group",
"to_bool",
"to_channel",
"to_color",
"to_colour",
"to_datetime",
"to_emoji",
"to_guild",
"to_invite",
"to_invite_with_metadata",
"to_member",
"to_presence",
"to_role",
"to_snowflake",
"to_user",
"to_voice_state",
"utilities",
"with_all_checks",
"with_any_checks",
"with_argument",
"with_author_permission_check",
"with_bool_slash_option",
"with_channel_slash_option",
"with_check",
"with_concurrency_limit",
"with_cooldown",
"with_dm_check",
"with_float_slash_option",
"with_greedy_argument",
"with_guild_check",
"with_int_slash_option",
"with_member_slash_option",
"with_mentionable_slash_option",
"with_multi_argument",
"with_multi_option",
"with_nsfw_check",
"with_option",
"with_own_permission_check",
"with_owner_check",
"with_parser",
"with_role_slash_option",
"with_sfw_check",
"with_str_slash_option",
"with_user_slash_option",
]
import typing
from . import abc
from . import context
from . import utilities
from .abc import ClientCallbackNames
from .checks import with_all_checks
from .checks import with_any_checks
from .checks import with_author_permission_check
from .checks import with_check
from .checks import with_dm_check
from .checks import with_guild_check
from .checks import with_nsfw_check
from .checks import with_own_permission_check
from .checks import with_owner_check
from .checks import with_sfw_check
from .clients import Client
from .clients import MessageAcceptsEnum
from .clients import as_loader
from .clients import as_unloader
from .commands import MenuCommand
from .commands import MessageCommand
from .commands import MessageCommandGroup
from .commands import SlashCommand
from .commands import SlashCommandGroup
from .commands import as_message_command
from .commands import as_message_command_group
from .commands import as_message_menu
from .commands import as_slash_command
from .commands import as_user_menu
from .commands import slash_command_group
from .commands import with_bool_slash_option
from .commands import with_channel_slash_option
from .commands import with_float_slash_option
from .commands import with_int_slash_option
from .commands import with_member_slash_option
from .commands import with_mentionable_slash_option
from .commands import with_role_slash_option
from .commands import with_str_slash_option
from .commands import with_user_slash_option
from .components import Component
from .conversion import to_bool
from .conversion import to_channel
from .conversion import to_color
from .conversion import to_colour
from .conversion import to_datetime
from .conversion import to_emoji
from .conversion import to_guild
from .conversion import to_invite
from .conversion import to_invite_with_metadata
from .conversion import to_member
from .conversion import to_presence
from .conversion import to_role
from .conversion import to_snowflake
from .conversion import to_user
from .conversion import to_voice_state
from .dependencies import BucketResource
from .dependencies import InMemoryConcurrencyLimiter
from .dependencies import InMemoryCooldownManager
from .dependencies import LazyConstant
from .dependencies import cached_inject
from .dependencies import inject_lc
from .dependencies import with_concurrency_limit
from .dependencies import with_cooldown
from .errors import CommandError
from .errors import ConversionError
from .errors import FailedCheck
from .errors import FailedModuleLoad
from .errors import FailedModuleUnload
from .errors import HaltExecution
from .errors import MissingDependencyError
from .errors import ModuleMissingLoaders
from .errors import ModuleStateConflict
from .errors import NotEnoughArgumentsError
from .errors import ParserError
from .errors import TanjunError
from .errors import TooManyArgumentsError
from .hooks import AnyHooks
from .hooks import Hooks
from .hooks import MessageHooks
from .hooks import SlashHooks
from .injecting import as_self_injecting
from .injecting import inject
from .injecting import injected
from .parsing import ShlexParser
from .parsing import with_argument
from .parsing import with_greedy_argument
from .parsing import with_multi_argument
from .parsing import with_multi_option
from .parsing import with_option
from .parsing import with_parser
from .schedules import as_interval
__author__: typing.Final[str] = "Faster Speeding"
__ci__: typing.Final[str] = "https://github.com/FasterSpeeding/Tanjun/actions"
__copyright__: typing.Final[str] = "© 2020-2022 Faster Speeding"
__coverage__: typing.Final[str] = "https://codeclimate.com/github/FasterSpeeding/Tanjun"
__docs__: typing.Final[str] = "https://tanjun.cursed.solutions/"
__email__: typing.Final[str] = "lucina@lmbyrne.dev"
__issue_tracker__: typing.Final[str] = "https://github.com/FasterSpeeding/Tanjun/issues"
__license__: typing.Final[str] = "BSD"
__url__: typing.Final[str] = "https://github.com/FasterSpeeding/Tanjun"
__version__: typing.Final[str] = "2.4.1a1"
| 33.548589 | 100 | 0.769576 |
from __future__ import annotations
__all__: list[str] = [
"AnyHooks",
"BucketResource",
"Client",
"ClientCallbackNames",
"CommandError",
"Component",
"ConversionError",
"FailedCheck",
"FailedModuleLoad",
"FailedModuleUnload",
"HaltExecution",
"Hooks",
"InMemoryConcurrencyLimiter",
"InMemoryCooldownManager",
"LazyConstant",
"MenuCommand",
"MessageAcceptsEnum",
"MessageCommand",
"MessageCommandGroup",
"MessageHooks",
"MissingDependencyError",
"ModuleMissingLoaders",
"ModuleStateConflict",
"NotEnoughArgumentsError",
"ParserError",
"ShlexParser",
"SlashCommand",
"SlashCommandGroup",
"SlashHooks",
"TanjunError",
"TooManyArgumentsError",
"__author__",
"__ci__",
"__copyright__",
"__coverage__",
"__docs__",
"__email__",
"__issue_tracker__",
"__license__",
"__url__",
"__version__",
"abc",
"as_interval",
"as_loader",
"as_message_command",
"as_message_command_group",
"as_message_menu",
"as_self_injecting",
"as_slash_command",
"as_unloader",
"as_user_menu",
"cached_inject",
"checks",
"clients",
"commands",
"components",
"context",
"conversion",
"dependencies",
"errors",
"hooks",
"inject",
"inject_lc",
"injected",
"injecting",
"parsing",
"schedules",
"slash_command_group",
"to_bool",
"to_channel",
"to_color",
"to_colour",
"to_datetime",
"to_emoji",
"to_guild",
"to_invite",
"to_invite_with_metadata",
"to_member",
"to_presence",
"to_role",
"to_snowflake",
"to_user",
"to_voice_state",
"utilities",
"with_all_checks",
"with_any_checks",
"with_argument",
"with_author_permission_check",
"with_bool_slash_option",
"with_channel_slash_option",
"with_check",
"with_concurrency_limit",
"with_cooldown",
"with_dm_check",
"with_float_slash_option",
"with_greedy_argument",
"with_guild_check",
"with_int_slash_option",
"with_member_slash_option",
"with_mentionable_slash_option",
"with_multi_argument",
"with_multi_option",
"with_nsfw_check",
"with_option",
"with_own_permission_check",
"with_owner_check",
"with_parser",
"with_role_slash_option",
"with_sfw_check",
"with_str_slash_option",
"with_user_slash_option",
]
import typing
from . import abc
from . import context
from . import utilities
from .abc import ClientCallbackNames
from .checks import with_all_checks
from .checks import with_any_checks
from .checks import with_author_permission_check
from .checks import with_check
from .checks import with_dm_check
from .checks import with_guild_check
from .checks import with_nsfw_check
from .checks import with_own_permission_check
from .checks import with_owner_check
from .checks import with_sfw_check
from .clients import Client
from .clients import MessageAcceptsEnum
from .clients import as_loader
from .clients import as_unloader
from .commands import MenuCommand
from .commands import MessageCommand
from .commands import MessageCommandGroup
from .commands import SlashCommand
from .commands import SlashCommandGroup
from .commands import as_message_command
from .commands import as_message_command_group
from .commands import as_message_menu
from .commands import as_slash_command
from .commands import as_user_menu
from .commands import slash_command_group
from .commands import with_bool_slash_option
from .commands import with_channel_slash_option
from .commands import with_float_slash_option
from .commands import with_int_slash_option
from .commands import with_member_slash_option
from .commands import with_mentionable_slash_option
from .commands import with_role_slash_option
from .commands import with_str_slash_option
from .commands import with_user_slash_option
from .components import Component
from .conversion import to_bool
from .conversion import to_channel
from .conversion import to_color
from .conversion import to_colour
from .conversion import to_datetime
from .conversion import to_emoji
from .conversion import to_guild
from .conversion import to_invite
from .conversion import to_invite_with_metadata
from .conversion import to_member
from .conversion import to_presence
from .conversion import to_role
from .conversion import to_snowflake
from .conversion import to_user
from .conversion import to_voice_state
from .dependencies import BucketResource
from .dependencies import InMemoryConcurrencyLimiter
from .dependencies import InMemoryCooldownManager
from .dependencies import LazyConstant
from .dependencies import cached_inject
from .dependencies import inject_lc
from .dependencies import with_concurrency_limit
from .dependencies import with_cooldown
from .errors import CommandError
from .errors import ConversionError
from .errors import FailedCheck
from .errors import FailedModuleLoad
from .errors import FailedModuleUnload
from .errors import HaltExecution
from .errors import MissingDependencyError
from .errors import ModuleMissingLoaders
from .errors import ModuleStateConflict
from .errors import NotEnoughArgumentsError
from .errors import ParserError
from .errors import TanjunError
from .errors import TooManyArgumentsError
from .hooks import AnyHooks
from .hooks import Hooks
from .hooks import MessageHooks
from .hooks import SlashHooks
from .injecting import as_self_injecting
from .injecting import inject
from .injecting import injected
from .parsing import ShlexParser
from .parsing import with_argument
from .parsing import with_greedy_argument
from .parsing import with_multi_argument
from .parsing import with_multi_option
from .parsing import with_option
from .parsing import with_parser
from .schedules import as_interval
__author__: typing.Final[str] = "Faster Speeding"
__ci__: typing.Final[str] = "https://github.com/FasterSpeeding/Tanjun/actions"
__copyright__: typing.Final[str] = "© 2020-2022 Faster Speeding"
__coverage__: typing.Final[str] = "https://codeclimate.com/github/FasterSpeeding/Tanjun"
__docs__: typing.Final[str] = "https://tanjun.cursed.solutions/"
__email__: typing.Final[str] = "lucina@lmbyrne.dev"
__issue_tracker__: typing.Final[str] = "https://github.com/FasterSpeeding/Tanjun/issues"
__license__: typing.Final[str] = "BSD"
__url__: typing.Final[str] = "https://github.com/FasterSpeeding/Tanjun"
__version__: typing.Final[str] = "2.4.1a1"
| true | true |
1c3ab1ef5f86b0cd1cd75c356f7f133390e6603d | 47,320 | py | Python | cwltool/command_line_tool.py | giannisdoukas/cwltool | 5a29b0742b8387f1ce8fc11e9b408a3b636432ee | [
"Apache-2.0"
] | null | null | null | cwltool/command_line_tool.py | giannisdoukas/cwltool | 5a29b0742b8387f1ce8fc11e9b408a3b636432ee | [
"Apache-2.0"
] | null | null | null | cwltool/command_line_tool.py | giannisdoukas/cwltool | 5a29b0742b8387f1ce8fc11e9b408a3b636432ee | [
"Apache-2.0"
] | null | null | null | """Implementation of CommandLineTool."""
import copy
import hashlib
import json
import locale
import logging
import os
import re
import shutil
import tempfile
import threading
import urllib
from functools import cmp_to_key, partial
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Set,
TextIO,
Union,
cast,
)
import shellescape
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.avro.schema import Schema
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import file_uri, uri_file_path
from schema_salad.sourceline import SourceLine
from schema_salad.utils import json_dumps
from schema_salad.validate import validate_ex
from typing_extensions import TYPE_CHECKING, Type
from .builder import Builder, content_limit_respected_read_bytes, substitute
from .context import LoadingContext, RuntimeContext, getdefault
from .docker import DockerCommandLineJob
from .errors import UnsupportedRequirement, WorkflowException
from .flatten import flatten
from .job import CommandLineJob, JobBase
from .loghandler import _logger
from .mpi import MPIRequirementName
from .mutation import MutationManager
from .pathmapper import PathMapper
from .process import (
Process,
_logger_validation_warnings,
compute_checksums,
shortname,
uniquename,
)
from .singularity import SingularityCommandLineJob
from .stdfsaccess import StdFsAccess
from .udocker import UDockerCommandLineJob
from .utils import (
CWLObjectType,
CWLOutputType,
DirectoryType,
JobsGeneratorType,
OutputCallbackType,
adjustDirObjs,
adjustFileObjs,
aslist,
convert_pathsep_to_unix,
docker_windows_path_adjust,
get_listing,
normalizeFilesDirs,
onWindows,
random_outdir,
shared_file_lock,
trim_listing,
upgrade_lock,
visit_class,
windows_default_container_id,
)
if TYPE_CHECKING:
from .provenance_profile import ProvenanceProfile # pylint: disable=unused-import
ACCEPTLIST_EN_STRICT_RE = re.compile(r"^[a-zA-Z0-9._+-]+$")
ACCEPTLIST_EN_RELAXED_RE = re.compile(r".*") # Accept anything
ACCEPTLIST_RE = ACCEPTLIST_EN_STRICT_RE
DEFAULT_CONTAINER_MSG = """
We are on Microsoft Windows and not all components of this CWL description have a
container specified. This means that these steps will be executed in the default container,
which is %s.
Note, this could affect portability if this CWL description relies on non-POSIX features
or commands in this container. For best results add the following to your CWL
description's hints section:
hints:
DockerRequirement:
dockerPull: %s
"""
class ExpressionJob(object):
"""Job for ExpressionTools."""
def __init__(
self,
builder: Builder,
script: str,
output_callback: Optional[OutputCallbackType],
requirements: List[CWLObjectType],
hints: List[CWLObjectType],
outdir: Optional[str] = None,
tmpdir: Optional[str] = None,
) -> None:
"""Initializet this ExpressionJob."""
self.builder = builder
self.requirements = requirements
self.hints = hints
self.output_callback = output_callback
self.outdir = outdir
self.tmpdir = tmpdir
self.script = script
self.prov_obj = None # type: Optional[ProvenanceProfile]
def run(
self,
runtimeContext: RuntimeContext,
tmpdir_lock: Optional[threading.Lock] = None,
) -> None:
try:
normalizeFilesDirs(self.builder.job)
ev = self.builder.do_eval(self.script)
normalizeFilesDirs(
cast(
Optional[
Union[
MutableSequence[MutableMapping[str, Any]],
MutableMapping[str, Any],
DirectoryType,
]
],
ev,
)
)
if self.output_callback:
self.output_callback(cast(Optional[CWLObjectType], ev), "success")
except WorkflowException as err:
_logger.warning(
"Failed to evaluate expression:\n%s",
str(err),
exc_info=runtimeContext.debug,
)
if self.output_callback:
self.output_callback({}, "permanentFail")
class ExpressionTool(Process):
def job(
self,
job_order: CWLObjectType,
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> Generator[ExpressionJob, None, None]:
builder = self._init_job(job_order, runtimeContext)
job = ExpressionJob(
builder,
self.tool["expression"],
output_callbacks,
self.requirements,
self.hints,
)
job.prov_obj = runtimeContext.prov_obj
yield job
class AbstractOperation(Process):
def job(
self,
job_order: CWLObjectType,
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> JobsGeneratorType:
raise WorkflowException("Abstract operation cannot be executed.")
def remove_path(f): # type: (CWLObjectType) -> None
if "path" in f:
del f["path"]
def revmap_file(
builder: Builder, outdir: str, f: CWLObjectType
) -> Optional[CWLObjectType]:
"""
Remap a file from internal path to external path.
For Docker, this maps from the path inside tho container to the path
outside the container. Recognizes files in the pathmapper or remaps
internal output directories to the external directory.
"""
split = urllib.parse.urlsplit(outdir)
if not split.scheme:
outdir = file_uri(str(outdir))
# builder.outdir is the inner (container/compute node) output directory
# outdir is the outer (host/storage system) output directory
if "location" in f and "path" not in f:
location = cast(str, f["location"])
if location.startswith("file://"):
f["path"] = convert_pathsep_to_unix(uri_file_path(location))
else:
return f
if "path" in f:
path = cast(str, f["path"])
uripath = file_uri(path)
del f["path"]
if "basename" not in f:
f["basename"] = os.path.basename(path)
if not builder.pathmapper:
raise ValueError(
"Do not call revmap_file using a builder that doesn't have a pathmapper."
)
revmap_f = builder.pathmapper.reversemap(path)
if revmap_f and not builder.pathmapper.mapper(revmap_f[0]).type.startswith(
"Writable"
):
f["location"] = revmap_f[1]
elif (
uripath == outdir
or uripath.startswith(outdir + os.sep)
or uripath.startswith(outdir + "/")
):
f["location"] = file_uri(path)
elif (
path == builder.outdir
or path.startswith(builder.outdir + os.sep)
or path.startswith(builder.outdir + "/")
):
f["location"] = builder.fs_access.join(
outdir, path[len(builder.outdir) + 1 :]
)
elif not os.path.isabs(path):
f["location"] = builder.fs_access.join(outdir, path)
else:
raise WorkflowException(
"Output file path %s must be within designated output directory (%s) or an input "
"file pass through." % (path, builder.outdir)
)
return f
raise WorkflowException(
"Output File object is missing both 'location' " "and 'path' fields: %s" % f
)
class CallbackJob(object):
def __init__(
self,
job: "CommandLineTool",
output_callback: Optional[OutputCallbackType],
cachebuilder: Builder,
jobcache: str,
) -> None:
"""Initialize this CallbackJob."""
self.job = job
self.output_callback = output_callback
self.cachebuilder = cachebuilder
self.outdir = jobcache
self.prov_obj = None # type: Optional[ProvenanceProfile]
def run(
self,
runtimeContext: RuntimeContext,
tmpdir_lock: Optional[threading.Lock] = None,
) -> None:
if self.output_callback:
self.output_callback(
self.job.collect_output_ports(
self.job.tool["outputs"],
self.cachebuilder,
self.outdir,
getdefault(runtimeContext.compute_checksum, True),
),
"success",
)
def check_adjust(builder: Builder, file_o: CWLObjectType) -> CWLObjectType:
"""
Map files to assigned path inside a container.
We need to also explicitly walk over input, as implicit reassignment
doesn't reach everything in builder.bindings
"""
if not builder.pathmapper:
raise ValueError(
"Do not call check_adjust using a builder that doesn't have a pathmapper."
)
file_o["path"] = path = docker_windows_path_adjust(
builder.pathmapper.mapper(cast(str, file_o["location"]))[1]
)
basename = cast(str, file_o.get("basename"))
dn, bn = os.path.split(path)
if file_o.get("dirname") != dn:
file_o["dirname"] = str(dn)
if basename != bn:
file_o["basename"] = basename = str(bn)
if file_o["class"] == "File":
nr, ne = os.path.splitext(basename)
if file_o.get("nameroot") != nr:
file_o["nameroot"] = str(nr)
if file_o.get("nameext") != ne:
file_o["nameext"] = str(ne)
if not ACCEPTLIST_RE.match(basename):
raise WorkflowException(
"Invalid filename: '{}' contains illegal characters".format(
file_o["basename"]
)
)
return file_o
def check_valid_locations(fs_access: StdFsAccess, ob: CWLObjectType) -> None:
location = cast(str, ob["location"])
if location.startswith("_:"):
pass
if ob["class"] == "File" and not fs_access.isfile(location):
raise ValidationException("Does not exist or is not a File: '%s'" % location)
if ob["class"] == "Directory" and not fs_access.isdir(location):
raise ValidationException(
"Does not exist or is not a Directory: '%s'" % location
)
OutputPortsType = Dict[str, Optional[CWLOutputType]]
class ParameterOutputWorkflowException(WorkflowException):
def __init__(self, msg: str, port: CWLObjectType, **kwargs: Any) -> None:
super(ParameterOutputWorkflowException, self).__init__(
"Error collecting output for parameter '%s':\n%s"
% (shortname(cast(str, port["id"])), msg),
kwargs,
)
class CommandLineTool(Process):
def __init__(
self, toolpath_object: CommentedMap, loadingContext: LoadingContext
) -> None:
"""Initialize this CommandLineTool."""
super(CommandLineTool, self).__init__(toolpath_object, loadingContext)
self.prov_obj = loadingContext.prov_obj
def make_job_runner(self, runtimeContext: RuntimeContext) -> Type[JobBase]:
dockerReq, dockerRequired = self.get_requirement("DockerRequirement")
mpiReq, mpiRequired = self.get_requirement(MPIRequirementName)
if not dockerReq and runtimeContext.use_container:
if runtimeContext.find_default_container is not None:
default_container = runtimeContext.find_default_container(self)
if default_container is not None:
dockerReq = {
"class": "DockerRequirement",
"dockerPull": default_container,
}
if mpiRequired:
self.hints.insert(0, dockerReq)
dockerRequired = False
else:
self.requirements.insert(0, dockerReq)
dockerRequired = True
if (
default_container == windows_default_container_id
and runtimeContext.use_container
and onWindows()
):
_logger.warning(
DEFAULT_CONTAINER_MSG,
windows_default_container_id,
windows_default_container_id,
)
if dockerReq is not None and runtimeContext.use_container:
if mpiReq is not None:
_logger.warning("MPIRequirement with containers is a beta feature")
if runtimeContext.singularity:
return SingularityCommandLineJob
elif runtimeContext.user_space_docker_cmd:
return UDockerCommandLineJob
if mpiReq is not None:
if mpiRequired:
if dockerRequired:
raise UnsupportedRequirement(
"No support for Docker and MPIRequirement both being required"
)
else:
_logger.warning(
"MPI has been required while Docker is hinted, discarding Docker hint(s)"
)
self.hints = [
h for h in self.hints if h["class"] != "DockerRequirement"
]
return CommandLineJob
else:
if dockerRequired:
_logger.warning(
"Docker has been required while MPI is hinted, discarding MPI hint(s)"
)
self.hints = [
h for h in self.hints if h["class"] != MPIRequirementName
]
else:
raise UnsupportedRequirement(
"Both Docker and MPI have been hinted - don't know what to do"
)
return DockerCommandLineJob
if dockerRequired:
raise UnsupportedRequirement(
"--no-container, but this CommandLineTool has "
"DockerRequirement under 'requirements'."
)
return CommandLineJob
def make_path_mapper(
self,
reffiles: List[CWLObjectType],
stagedir: str,
runtimeContext: RuntimeContext,
separateDirs: bool,
) -> PathMapper:
return PathMapper(reffiles, runtimeContext.basedir, stagedir, separateDirs)
def updatePathmap(
self, outdir: str, pathmap: PathMapper, fn: CWLObjectType
) -> None:
basename = cast(str, fn["basename"])
if "location" in fn:
location = cast(str, fn["location"])
if location in pathmap:
pathmap.update(
location,
pathmap.mapper(location).resolved,
os.path.join(outdir, basename),
("Writable" if fn.get("writable") else "") + cast(str, fn["class"]),
False,
)
for sf in cast(List[CWLObjectType], fn.get("secondaryFiles", [])):
self.updatePathmap(outdir, pathmap, sf)
for ls in cast(List[CWLObjectType], fn.get("listing", [])):
self.updatePathmap(
os.path.join(outdir, cast(str, fn["basename"])), pathmap, ls
)
def job(
self,
job_order: CWLObjectType,
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> Generator[Union[JobBase, CallbackJob], None, None]:
workReuse, _ = self.get_requirement("WorkReuse")
enableReuse = workReuse.get("enableReuse", True) if workReuse else True
jobname = uniquename(
runtimeContext.name or shortname(self.tool.get("id", "job"))
)
if runtimeContext.cachedir and enableReuse:
cachecontext = runtimeContext.copy()
cachecontext.outdir = "/out"
cachecontext.tmpdir = "/tmp" # nosec
cachecontext.stagedir = "/stage"
cachebuilder = self._init_job(job_order, cachecontext)
cachebuilder.pathmapper = PathMapper(
cachebuilder.files,
runtimeContext.basedir,
cachebuilder.stagedir,
separateDirs=False,
)
_check_adjust = partial(check_adjust, cachebuilder)
visit_class(
[cachebuilder.files, cachebuilder.bindings],
("File", "Directory"),
_check_adjust,
)
cmdline = flatten(
list(map(cachebuilder.generate_arg, cachebuilder.bindings))
)
docker_req, _ = self.get_requirement("DockerRequirement")
if docker_req is not None and runtimeContext.use_container:
dockerimg = docker_req.get("dockerImageId") or docker_req.get(
"dockerPull"
)
elif (
runtimeContext.default_container is not None
and runtimeContext.use_container
):
dockerimg = runtimeContext.default_container
else:
dockerimg = None
if dockerimg is not None:
cmdline = ["docker", "run", dockerimg] + cmdline
# not really run using docker, just for hashing purposes
keydict = {
"cmdline": cmdline
} # type: Dict[str, Union[MutableSequence[Union[str, int]], CWLObjectType]]
for shortcut in ["stdin", "stdout", "stderr"]:
if shortcut in self.tool:
keydict[shortcut] = self.tool[shortcut]
def calc_checksum(location: str) -> Optional[str]:
for e in cachebuilder.files:
if (
"location" in e
and e["location"] == location
and "checksum" in e
and e["checksum"] != "sha1$hash"
):
return cast(Optional[str], e["checksum"])
return None
for location, fobj in cachebuilder.pathmapper.items():
if fobj.type == "File":
checksum = calc_checksum(location)
fobj_stat = os.stat(fobj.resolved)
if checksum is not None:
keydict[fobj.resolved] = [fobj_stat.st_size, checksum]
else:
keydict[fobj.resolved] = [
fobj_stat.st_size,
int(fobj_stat.st_mtime * 1000),
]
interesting = {
"DockerRequirement",
"EnvVarRequirement",
"InitialWorkDirRequirement",
"ShellCommandRequirement",
"NetworkAccess",
}
for rh in (self.original_requirements, self.original_hints):
for r in reversed(rh):
cls = cast(str, r["class"])
if cls in interesting and cls not in keydict:
keydict[cls] = r
keydictstr = json_dumps(keydict, separators=(",", ":"), sort_keys=True)
cachekey = hashlib.md5(keydictstr.encode("utf-8")).hexdigest() # nosec
_logger.debug(
"[job %s] keydictstr is %s -> %s", jobname, keydictstr, cachekey
)
jobcache = os.path.join(runtimeContext.cachedir, cachekey)
# Create a lockfile to manage cache status.
jobcachepending = "{}.status".format(jobcache)
jobcachelock = None
jobstatus = None
# Opens the file for read/write, or creates an empty file.
jobcachelock = open(jobcachepending, "a+")
# get the shared lock to ensure no other process is trying
# to write to this cache
shared_file_lock(jobcachelock)
jobcachelock.seek(0)
jobstatus = jobcachelock.read()
if os.path.isdir(jobcache) and jobstatus == "success":
if docker_req and runtimeContext.use_container:
cachebuilder.outdir = (
runtimeContext.docker_outdir or random_outdir()
)
else:
cachebuilder.outdir = jobcache
_logger.info("[job %s] Using cached output in %s", jobname, jobcache)
yield CallbackJob(self, output_callbacks, cachebuilder, jobcache)
# we're done with the cache so release lock
jobcachelock.close()
return
else:
_logger.info(
"[job %s] Output of job will be cached in %s", jobname, jobcache
)
# turn shared lock into an exclusive lock since we'll
# be writing the cache directory
upgrade_lock(jobcachelock)
shutil.rmtree(jobcache, True)
os.makedirs(jobcache)
runtimeContext = runtimeContext.copy()
runtimeContext.outdir = jobcache
def update_status_output_callback(
output_callbacks: OutputCallbackType,
jobcachelock: TextIO,
outputs: Optional[CWLObjectType],
processStatus: str,
) -> None:
# save status to the lockfile then release the lock
jobcachelock.seek(0)
jobcachelock.truncate()
jobcachelock.write(processStatus)
jobcachelock.close()
output_callbacks(outputs, processStatus)
output_callbacks = partial(
update_status_output_callback, output_callbacks, jobcachelock
)
builder = self._init_job(job_order, runtimeContext)
reffiles = copy.deepcopy(builder.files)
j = self.make_job_runner(runtimeContext)(
builder,
builder.job,
self.make_path_mapper,
self.requirements,
self.hints,
jobname,
)
j.prov_obj = self.prov_obj
j.successCodes = self.tool.get("successCodes", [])
j.temporaryFailCodes = self.tool.get("temporaryFailCodes", [])
j.permanentFailCodes = self.tool.get("permanentFailCodes", [])
debug = _logger.isEnabledFor(logging.DEBUG)
if debug:
_logger.debug(
"[job %s] initializing from %s%s",
j.name,
self.tool.get("id", ""),
" as part of %s" % runtimeContext.part_of
if runtimeContext.part_of
else "",
)
_logger.debug("[job %s] %s", j.name, json_dumps(builder.job, indent=4))
builder.pathmapper = self.make_path_mapper(
reffiles, builder.stagedir, runtimeContext, True
)
builder.requirements = j.requirements
_check_adjust = partial(check_adjust, builder)
visit_class(
[builder.files, builder.bindings], ("File", "Directory"), _check_adjust
)
initialWorkdir, _ = self.get_requirement("InitialWorkDirRequirement")
if initialWorkdir is not None:
ls = [] # type: List[CWLObjectType]
if isinstance(initialWorkdir["listing"], str):
ls = cast(
List[CWLObjectType], builder.do_eval(initialWorkdir["listing"])
)
else:
for t in cast(
MutableSequence[Union[str, CWLObjectType]],
initialWorkdir["listing"],
):
if isinstance(t, Mapping) and "entry" in t:
entry_exp = builder.do_eval(
cast(str, t["entry"]), strip_whitespace=False
)
for entry in aslist(entry_exp):
et = {"entry": entry}
if "entryname" in t:
et["entryname"] = builder.do_eval(
cast(str, t["entryname"])
)
else:
et["entryname"] = None
et["writable"] = t.get("writable", False)
if et["entry"] is not None:
ls.append(et)
else:
initwd_item = builder.do_eval(t)
if not initwd_item:
continue
if isinstance(initwd_item, MutableSequence):
ls.extend(cast(List[CWLObjectType], initwd_item))
else:
ls.append(cast(CWLObjectType, initwd_item))
for i, t2 in enumerate(ls):
if "entry" in t2:
if isinstance(t2["entry"], str):
ls[i] = {
"class": "File",
"basename": t2["entryname"],
"contents": t2["entry"],
"writable": t2.get("writable"),
}
else:
if t2.get("entryname") or t2.get("writable"):
t2 = copy.deepcopy(t2)
t2entry = cast(CWLObjectType, t2["entry"])
if t2.get("entryname"):
t2entry["basename"] = t2["entryname"]
t2entry["writable"] = t2.get("writable")
ls[i] = cast(CWLObjectType, t2["entry"])
j.generatefiles["listing"] = ls
for entry in ls:
self.updatePathmap(builder.outdir, builder.pathmapper, entry)
visit_class(
[builder.files, builder.bindings], ("File", "Directory"), _check_adjust
)
if debug:
_logger.debug(
"[job %s] path mappings is %s",
j.name,
json_dumps(
{
p: builder.pathmapper.mapper(p)
for p in builder.pathmapper.files()
},
indent=4,
),
)
if self.tool.get("stdin"):
with SourceLine(self.tool, "stdin", ValidationException, debug):
j.stdin = cast(str, builder.do_eval(self.tool["stdin"]))
if j.stdin:
reffiles.append({"class": "File", "path": j.stdin})
if self.tool.get("stderr"):
with SourceLine(self.tool, "stderr", ValidationException, debug):
j.stderr = cast(str, builder.do_eval(self.tool["stderr"]))
if j.stderr:
if os.path.isabs(j.stderr) or ".." in j.stderr:
raise ValidationException(
"stderr must be a relative path, got '%s'" % j.stderr
)
if self.tool.get("stdout"):
with SourceLine(self.tool, "stdout", ValidationException, debug):
j.stdout = cast(str, builder.do_eval(self.tool["stdout"]))
if j.stdout:
if os.path.isabs(j.stdout) or ".." in j.stdout or not j.stdout:
raise ValidationException(
"stdout must be a relative path, got '%s'" % j.stdout
)
if debug:
_logger.debug(
"[job %s] command line bindings is %s",
j.name,
json_dumps(builder.bindings, indent=4),
)
dockerReq, _ = self.get_requirement("DockerRequirement")
if dockerReq is not None and runtimeContext.use_container:
out_dir, out_prefix = os.path.split(runtimeContext.tmp_outdir_prefix)
j.outdir = runtimeContext.outdir or tempfile.mkdtemp(
prefix=out_prefix, dir=out_dir
)
tmpdir_dir, tmpdir_prefix = os.path.split(runtimeContext.tmpdir_prefix)
j.tmpdir = runtimeContext.tmpdir or tempfile.mkdtemp(
prefix=tmpdir_prefix, dir=tmpdir_dir
)
j.stagedir = tempfile.mkdtemp(prefix=tmpdir_prefix, dir=tmpdir_dir)
else:
j.outdir = builder.outdir
j.tmpdir = builder.tmpdir
j.stagedir = builder.stagedir
inplaceUpdateReq, _ = self.get_requirement("InplaceUpdateRequirement")
if inplaceUpdateReq is not None:
j.inplace_update = cast(bool, inplaceUpdateReq["inplaceUpdate"])
normalizeFilesDirs(j.generatefiles)
readers = {} # type: Dict[str, CWLObjectType]
muts = set() # type: Set[str]
if builder.mutation_manager is not None:
def register_mut(f: CWLObjectType) -> None:
mm = cast(MutationManager, builder.mutation_manager)
muts.add(cast(str, f["location"]))
mm.register_mutation(j.name, f)
def register_reader(f: CWLObjectType) -> None:
mm = cast(MutationManager, builder.mutation_manager)
if cast(str, f["location"]) not in muts:
mm.register_reader(j.name, f)
readers[cast(str, f["location"])] = copy.deepcopy(f)
for li in j.generatefiles["listing"]:
if li.get("writable") and j.inplace_update:
adjustFileObjs(li, register_mut)
adjustDirObjs(li, register_mut)
else:
adjustFileObjs(li, register_reader)
adjustDirObjs(li, register_reader)
adjustFileObjs(builder.files, register_reader)
adjustFileObjs(builder.bindings, register_reader)
adjustDirObjs(builder.files, register_reader)
adjustDirObjs(builder.bindings, register_reader)
timelimit, _ = self.get_requirement("ToolTimeLimit")
if timelimit is not None:
with SourceLine(timelimit, "timelimit", ValidationException, debug):
j.timelimit = cast(
Optional[int],
builder.do_eval(cast(Union[int, str], timelimit["timelimit"])),
)
if not isinstance(j.timelimit, int) or j.timelimit < 0:
raise WorkflowException(
"timelimit must be an integer >= 0, got: %s" % j.timelimit
)
networkaccess, _ = self.get_requirement("NetworkAccess")
if networkaccess is not None:
with SourceLine(networkaccess, "networkAccess", ValidationException, debug):
j.networkaccess = cast(
bool,
builder.do_eval(
cast(Union[bool, str], networkaccess["networkAccess"])
),
)
if not isinstance(j.networkaccess, bool):
raise WorkflowException(
"networkAccess must be a boolean, got: %s" % j.networkaccess
)
j.environment = {}
evr, _ = self.get_requirement("EnvVarRequirement")
if evr is not None:
for t3 in cast(List[Dict[str, str]], evr["envDef"]):
j.environment[t3["envName"]] = cast(
str, builder.do_eval(t3["envValue"])
)
shellcmd, _ = self.get_requirement("ShellCommandRequirement")
if shellcmd is not None:
cmd = [] # type: List[str]
for b in builder.bindings:
arg = builder.generate_arg(b)
if b.get("shellQuote", True):
arg = [shellescape.quote(a) for a in aslist(arg)]
cmd.extend(aslist(arg))
j.command_line = ["/bin/sh", "-c", " ".join(cmd)]
else:
j.command_line = flatten(list(map(builder.generate_arg, builder.bindings)))
j.pathmapper = builder.pathmapper
j.collect_outputs = partial(
self.collect_output_ports,
self.tool["outputs"],
builder,
compute_checksum=getdefault(runtimeContext.compute_checksum, True),
jobname=jobname,
readers=readers,
)
j.output_callback = output_callbacks
mpi, _ = self.get_requirement(MPIRequirementName)
if mpi is not None:
np = cast( # From the schema for MPIRequirement.processes
Union[int, str],
mpi.get("processes", runtimeContext.mpi_config.default_nproc),
)
if isinstance(np, str):
tmp = builder.do_eval(np)
if not isinstance(tmp, int):
raise TypeError(
"{} needs 'processes' to evaluate to an int, got {}".format(
MPIRequirementName, type(np)
)
)
np = tmp
j.mpi_procs = np
yield j
def collect_output_ports(
self,
ports: Union[CommentedSeq, Set[CWLObjectType]],
builder: Builder,
outdir: str,
rcode: int,
compute_checksum: bool = True,
jobname: str = "",
readers: Optional[MutableMapping[str, CWLObjectType]] = None,
) -> OutputPortsType:
ret = {} # type: OutputPortsType
debug = _logger.isEnabledFor(logging.DEBUG)
cwl_version = self.metadata.get(
"http://commonwl.org/cwltool#original_cwlVersion", None
)
if cwl_version != "v1.0":
builder.resources["exitCode"] = rcode
try:
fs_access = builder.make_fs_access(outdir)
custom_output = fs_access.join(outdir, "cwl.output.json")
if fs_access.exists(custom_output):
with fs_access.open(custom_output, "r") as f:
ret = json.load(f)
if debug:
_logger.debug(
"Raw output from %s: %s",
custom_output,
json_dumps(ret, indent=4),
)
else:
for i, port in enumerate(ports):
with SourceLine(
ports,
i,
partial(ParameterOutputWorkflowException, port=port),
debug,
):
fragment = shortname(port["id"])
ret[fragment] = self.collect_output(
port,
builder,
outdir,
fs_access,
compute_checksum=compute_checksum,
)
if ret:
revmap = partial(revmap_file, builder, outdir)
adjustDirObjs(ret, trim_listing)
visit_class(ret, ("File", "Directory"), revmap)
visit_class(ret, ("File", "Directory"), remove_path)
normalizeFilesDirs(ret)
visit_class(
ret,
("File", "Directory"),
partial(check_valid_locations, fs_access),
)
if compute_checksum:
adjustFileObjs(ret, partial(compute_checksums, fs_access))
expected_schema = cast(
Schema, self.names.get_name("outputs_record_schema", None)
)
validate_ex(
expected_schema, ret, strict=False, logger=_logger_validation_warnings
)
if ret is not None and builder.mutation_manager is not None:
adjustFileObjs(ret, builder.mutation_manager.set_generation)
return ret if ret is not None else {}
except ValidationException as e:
raise WorkflowException(
"Error validating output record. "
+ str(e)
+ "\n in "
+ json_dumps(ret, indent=4)
) from e
finally:
if builder.mutation_manager and readers:
for r in readers.values():
builder.mutation_manager.release_reader(jobname, r)
def collect_output(
self,
schema: CWLObjectType,
builder: Builder,
outdir: str,
fs_access: StdFsAccess,
compute_checksum: bool = True,
) -> Optional[CWLOutputType]:
r = [] # type: List[CWLOutputType]
empty_and_optional = False
debug = _logger.isEnabledFor(logging.DEBUG)
if "outputBinding" in schema:
binding = cast(
MutableMapping[str, Union[bool, str, List[str]]],
schema["outputBinding"],
)
globpatterns = [] # type: List[str]
revmap = partial(revmap_file, builder, outdir)
if "glob" in binding:
with SourceLine(binding, "glob", WorkflowException, debug):
for gb in aslist(binding["glob"]):
gb = builder.do_eval(gb)
if gb:
globpatterns.extend(aslist(gb))
for gb in globpatterns:
if gb.startswith(builder.outdir):
gb = gb[len(builder.outdir) + 1 :]
elif gb == ".":
gb = outdir
elif gb.startswith("/"):
raise WorkflowException(
"glob patterns must not start with '/'"
)
try:
prefix = fs_access.glob(outdir)
r.extend(
[
{
"location": g,
"path": fs_access.join(
builder.outdir, g[len(prefix[0]) + 1 :]
),
"basename": os.path.basename(g),
"nameroot": os.path.splitext(
os.path.basename(g)
)[0],
"nameext": os.path.splitext(
os.path.basename(g)
)[1],
"class": "File"
if fs_access.isfile(g)
else "Directory",
}
for g in sorted(
fs_access.glob(fs_access.join(outdir, gb)),
key=cmp_to_key(
cast(
Callable[[str, str], int],
locale.strcoll,
)
),
)
]
)
except (OSError, IOError) as e:
_logger.warning(str(e))
except Exception:
_logger.error(
"Unexpected error from fs_access", exc_info=True
)
raise
for files in cast(List[Dict[str, Optional[CWLOutputType]]], r):
rfile = files.copy()
revmap(rfile)
if files["class"] == "Directory":
ll = schema.get("loadListing") or builder.loadListing
if ll and ll != "no_listing":
get_listing(fs_access, files, (ll == "deep_listing"))
else:
if binding.get("loadContents"):
with fs_access.open(
cast(str, rfile["location"]), "rb"
) as f:
files["contents"] = content_limit_respected_read_bytes(
f
).decode("utf-8")
if compute_checksum:
with fs_access.open(
cast(str, rfile["location"]), "rb"
) as f:
checksum = hashlib.sha1() # nosec
contents = f.read(1024 * 1024)
while contents != b"":
checksum.update(contents)
contents = f.read(1024 * 1024)
files["checksum"] = "sha1$%s" % checksum.hexdigest()
files["size"] = fs_access.size(cast(str, rfile["location"]))
optional = False
single = False
if isinstance(schema["type"], MutableSequence):
if "null" in schema["type"]:
optional = True
if "File" in schema["type"] or "Directory" in schema["type"]:
single = True
elif schema["type"] == "File" or schema["type"] == "Directory":
single = True
if "outputEval" in binding:
with SourceLine(binding, "outputEval", WorkflowException, debug):
result = builder.do_eval(
cast(CWLOutputType, binding["outputEval"]), context=r
)
else:
result = cast(CWLOutputType, r)
if single:
if not result and not optional:
with SourceLine(binding, "glob", WorkflowException, debug):
raise WorkflowException(
"Did not find output file with glob pattern: '{}'".format(
globpatterns
)
)
elif not result and optional:
pass
elif isinstance(result, MutableSequence):
if len(result) > 1:
raise WorkflowException(
"Multiple matches for output item that is a single file."
)
else:
result = cast(CWLOutputType, result[0])
if "secondaryFiles" in schema:
with SourceLine(schema, "secondaryFiles", WorkflowException, debug):
for primary in aslist(result):
if isinstance(primary, MutableMapping):
primary.setdefault("secondaryFiles", [])
pathprefix = primary["path"][
0 : primary["path"].rindex(os.sep) + 1
]
for sf in aslist(schema["secondaryFiles"]):
if "required" in sf:
sf_required = builder.do_eval(
sf["required"], context=primary
)
else:
sf_required = False
if "$(" in sf["pattern"] or "${" in sf["pattern"]:
sfpath = builder.do_eval(
sf["pattern"], context=primary
)
else:
sfpath = substitute(
primary["basename"], sf["pattern"]
)
for sfitem in aslist(sfpath):
if not sfitem:
continue
if isinstance(sfitem, str):
sfitem = {"path": pathprefix + sfitem}
if (
not fs_access.exists(sfitem["path"])
and sf_required
):
raise WorkflowException(
"Missing required secondary file '%s'"
% (sfitem["path"])
)
if "path" in sfitem and "location" not in sfitem:
revmap(sfitem)
if fs_access.isfile(sfitem["location"]):
sfitem["class"] = "File"
primary["secondaryFiles"].append(sfitem)
elif fs_access.isdir(sfitem["location"]):
sfitem["class"] = "Directory"
primary["secondaryFiles"].append(sfitem)
if "format" in schema:
for primary in aslist(result):
primary["format"] = builder.do_eval(
schema["format"], context=primary
)
# Ensure files point to local references outside of the run environment
adjustFileObjs(result, revmap)
if not result and optional:
# Don't convert zero or empty string to None
if result in [0, ""]:
return result
# For [] or None, return None
else:
return None
if (
not empty_and_optional
and isinstance(schema["type"], MutableMapping)
and schema["type"]["type"] == "record"
):
out = {}
for field in cast(List[CWLObjectType], schema["type"]["fields"]):
out[shortname(cast(str, field["name"]))] = self.collect_output(
field, builder, outdir, fs_access, compute_checksum=compute_checksum
)
return out
return result
| 39.697987 | 101 | 0.501078 |
import copy
import hashlib
import json
import locale
import logging
import os
import re
import shutil
import tempfile
import threading
import urllib
from functools import cmp_to_key, partial
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Set,
TextIO,
Union,
cast,
)
import shellescape
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.avro.schema import Schema
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import file_uri, uri_file_path
from schema_salad.sourceline import SourceLine
from schema_salad.utils import json_dumps
from schema_salad.validate import validate_ex
from typing_extensions import TYPE_CHECKING, Type
from .builder import Builder, content_limit_respected_read_bytes, substitute
from .context import LoadingContext, RuntimeContext, getdefault
from .docker import DockerCommandLineJob
from .errors import UnsupportedRequirement, WorkflowException
from .flatten import flatten
from .job import CommandLineJob, JobBase
from .loghandler import _logger
from .mpi import MPIRequirementName
from .mutation import MutationManager
from .pathmapper import PathMapper
from .process import (
Process,
_logger_validation_warnings,
compute_checksums,
shortname,
uniquename,
)
from .singularity import SingularityCommandLineJob
from .stdfsaccess import StdFsAccess
from .udocker import UDockerCommandLineJob
from .utils import (
CWLObjectType,
CWLOutputType,
DirectoryType,
JobsGeneratorType,
OutputCallbackType,
adjustDirObjs,
adjustFileObjs,
aslist,
convert_pathsep_to_unix,
docker_windows_path_adjust,
get_listing,
normalizeFilesDirs,
onWindows,
random_outdir,
shared_file_lock,
trim_listing,
upgrade_lock,
visit_class,
windows_default_container_id,
)
if TYPE_CHECKING:
from .provenance_profile import ProvenanceProfile
ACCEPTLIST_EN_STRICT_RE = re.compile(r"^[a-zA-Z0-9._+-]+$")
ACCEPTLIST_EN_RELAXED_RE = re.compile(r".*")
ACCEPTLIST_RE = ACCEPTLIST_EN_STRICT_RE
DEFAULT_CONTAINER_MSG = """
We are on Microsoft Windows and not all components of this CWL description have a
container specified. This means that these steps will be executed in the default container,
which is %s.
Note, this could affect portability if this CWL description relies on non-POSIX features
or commands in this container. For best results add the following to your CWL
description's hints section:
hints:
DockerRequirement:
dockerPull: %s
"""
class ExpressionJob(object):
def __init__(
self,
builder: Builder,
script: str,
output_callback: Optional[OutputCallbackType],
requirements: List[CWLObjectType],
hints: List[CWLObjectType],
outdir: Optional[str] = None,
tmpdir: Optional[str] = None,
) -> None:
self.builder = builder
self.requirements = requirements
self.hints = hints
self.output_callback = output_callback
self.outdir = outdir
self.tmpdir = tmpdir
self.script = script
self.prov_obj = None # type: Optional[ProvenanceProfile]
def run(
self,
runtimeContext: RuntimeContext,
tmpdir_lock: Optional[threading.Lock] = None,
) -> None:
try:
normalizeFilesDirs(self.builder.job)
ev = self.builder.do_eval(self.script)
normalizeFilesDirs(
cast(
Optional[
Union[
MutableSequence[MutableMapping[str, Any]],
MutableMapping[str, Any],
DirectoryType,
]
],
ev,
)
)
if self.output_callback:
self.output_callback(cast(Optional[CWLObjectType], ev), "success")
except WorkflowException as err:
_logger.warning(
"Failed to evaluate expression:\n%s",
str(err),
exc_info=runtimeContext.debug,
)
if self.output_callback:
self.output_callback({}, "permanentFail")
class ExpressionTool(Process):
def job(
self,
job_order: CWLObjectType,
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> Generator[ExpressionJob, None, None]:
builder = self._init_job(job_order, runtimeContext)
job = ExpressionJob(
builder,
self.tool["expression"],
output_callbacks,
self.requirements,
self.hints,
)
job.prov_obj = runtimeContext.prov_obj
yield job
class AbstractOperation(Process):
def job(
self,
job_order: CWLObjectType,
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> JobsGeneratorType:
raise WorkflowException("Abstract operation cannot be executed.")
def remove_path(f): # type: (CWLObjectType) -> None
if "path" in f:
del f["path"]
def revmap_file(
builder: Builder, outdir: str, f: CWLObjectType
) -> Optional[CWLObjectType]:
split = urllib.parse.urlsplit(outdir)
if not split.scheme:
outdir = file_uri(str(outdir))
# builder.outdir is the inner (container/compute node) output directory
# outdir is the outer (host/storage system) output directory
if "location" in f and "path" not in f:
location = cast(str, f["location"])
if location.startswith("file://"):
f["path"] = convert_pathsep_to_unix(uri_file_path(location))
else:
return f
if "path" in f:
path = cast(str, f["path"])
uripath = file_uri(path)
del f["path"]
if "basename" not in f:
f["basename"] = os.path.basename(path)
if not builder.pathmapper:
raise ValueError(
"Do not call revmap_file using a builder that doesn't have a pathmapper."
)
revmap_f = builder.pathmapper.reversemap(path)
if revmap_f and not builder.pathmapper.mapper(revmap_f[0]).type.startswith(
"Writable"
):
f["location"] = revmap_f[1]
elif (
uripath == outdir
or uripath.startswith(outdir + os.sep)
or uripath.startswith(outdir + "/")
):
f["location"] = file_uri(path)
elif (
path == builder.outdir
or path.startswith(builder.outdir + os.sep)
or path.startswith(builder.outdir + "/")
):
f["location"] = builder.fs_access.join(
outdir, path[len(builder.outdir) + 1 :]
)
elif not os.path.isabs(path):
f["location"] = builder.fs_access.join(outdir, path)
else:
raise WorkflowException(
"Output file path %s must be within designated output directory (%s) or an input "
"file pass through." % (path, builder.outdir)
)
return f
raise WorkflowException(
"Output File object is missing both 'location' " "and 'path' fields: %s" % f
)
class CallbackJob(object):
def __init__(
self,
job: "CommandLineTool",
output_callback: Optional[OutputCallbackType],
cachebuilder: Builder,
jobcache: str,
) -> None:
self.job = job
self.output_callback = output_callback
self.cachebuilder = cachebuilder
self.outdir = jobcache
self.prov_obj = None
def run(
self,
runtimeContext: RuntimeContext,
tmpdir_lock: Optional[threading.Lock] = None,
) -> None:
if self.output_callback:
self.output_callback(
self.job.collect_output_ports(
self.job.tool["outputs"],
self.cachebuilder,
self.outdir,
getdefault(runtimeContext.compute_checksum, True),
),
"success",
)
def check_adjust(builder: Builder, file_o: CWLObjectType) -> CWLObjectType:
if not builder.pathmapper:
raise ValueError(
"Do not call check_adjust using a builder that doesn't have a pathmapper."
)
file_o["path"] = path = docker_windows_path_adjust(
builder.pathmapper.mapper(cast(str, file_o["location"]))[1]
)
basename = cast(str, file_o.get("basename"))
dn, bn = os.path.split(path)
if file_o.get("dirname") != dn:
file_o["dirname"] = str(dn)
if basename != bn:
file_o["basename"] = basename = str(bn)
if file_o["class"] == "File":
nr, ne = os.path.splitext(basename)
if file_o.get("nameroot") != nr:
file_o["nameroot"] = str(nr)
if file_o.get("nameext") != ne:
file_o["nameext"] = str(ne)
if not ACCEPTLIST_RE.match(basename):
raise WorkflowException(
"Invalid filename: '{}' contains illegal characters".format(
file_o["basename"]
)
)
return file_o
def check_valid_locations(fs_access: StdFsAccess, ob: CWLObjectType) -> None:
location = cast(str, ob["location"])
if location.startswith("_:"):
pass
if ob["class"] == "File" and not fs_access.isfile(location):
raise ValidationException("Does not exist or is not a File: '%s'" % location)
if ob["class"] == "Directory" and not fs_access.isdir(location):
raise ValidationException(
"Does not exist or is not a Directory: '%s'" % location
)
OutputPortsType = Dict[str, Optional[CWLOutputType]]
class ParameterOutputWorkflowException(WorkflowException):
def __init__(self, msg: str, port: CWLObjectType, **kwargs: Any) -> None:
super(ParameterOutputWorkflowException, self).__init__(
"Error collecting output for parameter '%s':\n%s"
% (shortname(cast(str, port["id"])), msg),
kwargs,
)
class CommandLineTool(Process):
def __init__(
self, toolpath_object: CommentedMap, loadingContext: LoadingContext
) -> None:
super(CommandLineTool, self).__init__(toolpath_object, loadingContext)
self.prov_obj = loadingContext.prov_obj
def make_job_runner(self, runtimeContext: RuntimeContext) -> Type[JobBase]:
dockerReq, dockerRequired = self.get_requirement("DockerRequirement")
mpiReq, mpiRequired = self.get_requirement(MPIRequirementName)
if not dockerReq and runtimeContext.use_container:
if runtimeContext.find_default_container is not None:
default_container = runtimeContext.find_default_container(self)
if default_container is not None:
dockerReq = {
"class": "DockerRequirement",
"dockerPull": default_container,
}
if mpiRequired:
self.hints.insert(0, dockerReq)
dockerRequired = False
else:
self.requirements.insert(0, dockerReq)
dockerRequired = True
if (
default_container == windows_default_container_id
and runtimeContext.use_container
and onWindows()
):
_logger.warning(
DEFAULT_CONTAINER_MSG,
windows_default_container_id,
windows_default_container_id,
)
if dockerReq is not None and runtimeContext.use_container:
if mpiReq is not None:
_logger.warning("MPIRequirement with containers is a beta feature")
if runtimeContext.singularity:
return SingularityCommandLineJob
elif runtimeContext.user_space_docker_cmd:
return UDockerCommandLineJob
if mpiReq is not None:
if mpiRequired:
if dockerRequired:
raise UnsupportedRequirement(
"No support for Docker and MPIRequirement both being required"
)
else:
_logger.warning(
"MPI has been required while Docker is hinted, discarding Docker hint(s)"
)
self.hints = [
h for h in self.hints if h["class"] != "DockerRequirement"
]
return CommandLineJob
else:
if dockerRequired:
_logger.warning(
"Docker has been required while MPI is hinted, discarding MPI hint(s)"
)
self.hints = [
h for h in self.hints if h["class"] != MPIRequirementName
]
else:
raise UnsupportedRequirement(
"Both Docker and MPI have been hinted - don't know what to do"
)
return DockerCommandLineJob
if dockerRequired:
raise UnsupportedRequirement(
"--no-container, but this CommandLineTool has "
"DockerRequirement under 'requirements'."
)
return CommandLineJob
def make_path_mapper(
self,
reffiles: List[CWLObjectType],
stagedir: str,
runtimeContext: RuntimeContext,
separateDirs: bool,
) -> PathMapper:
return PathMapper(reffiles, runtimeContext.basedir, stagedir, separateDirs)
def updatePathmap(
self, outdir: str, pathmap: PathMapper, fn: CWLObjectType
) -> None:
basename = cast(str, fn["basename"])
if "location" in fn:
location = cast(str, fn["location"])
if location in pathmap:
pathmap.update(
location,
pathmap.mapper(location).resolved,
os.path.join(outdir, basename),
("Writable" if fn.get("writable") else "") + cast(str, fn["class"]),
False,
)
for sf in cast(List[CWLObjectType], fn.get("secondaryFiles", [])):
self.updatePathmap(outdir, pathmap, sf)
for ls in cast(List[CWLObjectType], fn.get("listing", [])):
self.updatePathmap(
os.path.join(outdir, cast(str, fn["basename"])), pathmap, ls
)
def job(
self,
job_order: CWLObjectType,
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> Generator[Union[JobBase, CallbackJob], None, None]:
workReuse, _ = self.get_requirement("WorkReuse")
enableReuse = workReuse.get("enableReuse", True) if workReuse else True
jobname = uniquename(
runtimeContext.name or shortname(self.tool.get("id", "job"))
)
if runtimeContext.cachedir and enableReuse:
cachecontext = runtimeContext.copy()
cachecontext.outdir = "/out"
cachecontext.tmpdir = "/tmp"
cachecontext.stagedir = "/stage"
cachebuilder = self._init_job(job_order, cachecontext)
cachebuilder.pathmapper = PathMapper(
cachebuilder.files,
runtimeContext.basedir,
cachebuilder.stagedir,
separateDirs=False,
)
_check_adjust = partial(check_adjust, cachebuilder)
visit_class(
[cachebuilder.files, cachebuilder.bindings],
("File", "Directory"),
_check_adjust,
)
cmdline = flatten(
list(map(cachebuilder.generate_arg, cachebuilder.bindings))
)
docker_req, _ = self.get_requirement("DockerRequirement")
if docker_req is not None and runtimeContext.use_container:
dockerimg = docker_req.get("dockerImageId") or docker_req.get(
"dockerPull"
)
elif (
runtimeContext.default_container is not None
and runtimeContext.use_container
):
dockerimg = runtimeContext.default_container
else:
dockerimg = None
if dockerimg is not None:
cmdline = ["docker", "run", dockerimg] + cmdline
keydict = {
"cmdline": cmdline
}
for shortcut in ["stdin", "stdout", "stderr"]:
if shortcut in self.tool:
keydict[shortcut] = self.tool[shortcut]
def calc_checksum(location: str) -> Optional[str]:
for e in cachebuilder.files:
if (
"location" in e
and e["location"] == location
and "checksum" in e
and e["checksum"] != "sha1$hash"
):
return cast(Optional[str], e["checksum"])
return None
for location, fobj in cachebuilder.pathmapper.items():
if fobj.type == "File":
checksum = calc_checksum(location)
fobj_stat = os.stat(fobj.resolved)
if checksum is not None:
keydict[fobj.resolved] = [fobj_stat.st_size, checksum]
else:
keydict[fobj.resolved] = [
fobj_stat.st_size,
int(fobj_stat.st_mtime * 1000),
]
interesting = {
"DockerRequirement",
"EnvVarRequirement",
"InitialWorkDirRequirement",
"ShellCommandRequirement",
"NetworkAccess",
}
for rh in (self.original_requirements, self.original_hints):
for r in reversed(rh):
cls = cast(str, r["class"])
if cls in interesting and cls not in keydict:
keydict[cls] = r
keydictstr = json_dumps(keydict, separators=(",", ":"), sort_keys=True)
cachekey = hashlib.md5(keydictstr.encode("utf-8")).hexdigest()
_logger.debug(
"[job %s] keydictstr is %s -> %s", jobname, keydictstr, cachekey
)
jobcache = os.path.join(runtimeContext.cachedir, cachekey)
jobcachepending = "{}.status".format(jobcache)
jobcachelock = None
jobstatus = None
jobcachelock = open(jobcachepending, "a+")
shared_file_lock(jobcachelock)
jobcachelock.seek(0)
jobstatus = jobcachelock.read()
if os.path.isdir(jobcache) and jobstatus == "success":
if docker_req and runtimeContext.use_container:
cachebuilder.outdir = (
runtimeContext.docker_outdir or random_outdir()
)
else:
cachebuilder.outdir = jobcache
_logger.info("[job %s] Using cached output in %s", jobname, jobcache)
yield CallbackJob(self, output_callbacks, cachebuilder, jobcache)
jobcachelock.close()
return
else:
_logger.info(
"[job %s] Output of job will be cached in %s", jobname, jobcache
)
# turn shared lock into an exclusive lock since we'll
upgrade_lock(jobcachelock)
shutil.rmtree(jobcache, True)
os.makedirs(jobcache)
runtimeContext = runtimeContext.copy()
runtimeContext.outdir = jobcache
def update_status_output_callback(
output_callbacks: OutputCallbackType,
jobcachelock: TextIO,
outputs: Optional[CWLObjectType],
processStatus: str,
) -> None:
jobcachelock.seek(0)
jobcachelock.truncate()
jobcachelock.write(processStatus)
jobcachelock.close()
output_callbacks(outputs, processStatus)
output_callbacks = partial(
update_status_output_callback, output_callbacks, jobcachelock
)
builder = self._init_job(job_order, runtimeContext)
reffiles = copy.deepcopy(builder.files)
j = self.make_job_runner(runtimeContext)(
builder,
builder.job,
self.make_path_mapper,
self.requirements,
self.hints,
jobname,
)
j.prov_obj = self.prov_obj
j.successCodes = self.tool.get("successCodes", [])
j.temporaryFailCodes = self.tool.get("temporaryFailCodes", [])
j.permanentFailCodes = self.tool.get("permanentFailCodes", [])
debug = _logger.isEnabledFor(logging.DEBUG)
if debug:
_logger.debug(
"[job %s] initializing from %s%s",
j.name,
self.tool.get("id", ""),
" as part of %s" % runtimeContext.part_of
if runtimeContext.part_of
else "",
)
_logger.debug("[job %s] %s", j.name, json_dumps(builder.job, indent=4))
builder.pathmapper = self.make_path_mapper(
reffiles, builder.stagedir, runtimeContext, True
)
builder.requirements = j.requirements
_check_adjust = partial(check_adjust, builder)
visit_class(
[builder.files, builder.bindings], ("File", "Directory"), _check_adjust
)
initialWorkdir, _ = self.get_requirement("InitialWorkDirRequirement")
if initialWorkdir is not None:
ls = []
if isinstance(initialWorkdir["listing"], str):
ls = cast(
List[CWLObjectType], builder.do_eval(initialWorkdir["listing"])
)
else:
for t in cast(
MutableSequence[Union[str, CWLObjectType]],
initialWorkdir["listing"],
):
if isinstance(t, Mapping) and "entry" in t:
entry_exp = builder.do_eval(
cast(str, t["entry"]), strip_whitespace=False
)
for entry in aslist(entry_exp):
et = {"entry": entry}
if "entryname" in t:
et["entryname"] = builder.do_eval(
cast(str, t["entryname"])
)
else:
et["entryname"] = None
et["writable"] = t.get("writable", False)
if et["entry"] is not None:
ls.append(et)
else:
initwd_item = builder.do_eval(t)
if not initwd_item:
continue
if isinstance(initwd_item, MutableSequence):
ls.extend(cast(List[CWLObjectType], initwd_item))
else:
ls.append(cast(CWLObjectType, initwd_item))
for i, t2 in enumerate(ls):
if "entry" in t2:
if isinstance(t2["entry"], str):
ls[i] = {
"class": "File",
"basename": t2["entryname"],
"contents": t2["entry"],
"writable": t2.get("writable"),
}
else:
if t2.get("entryname") or t2.get("writable"):
t2 = copy.deepcopy(t2)
t2entry = cast(CWLObjectType, t2["entry"])
if t2.get("entryname"):
t2entry["basename"] = t2["entryname"]
t2entry["writable"] = t2.get("writable")
ls[i] = cast(CWLObjectType, t2["entry"])
j.generatefiles["listing"] = ls
for entry in ls:
self.updatePathmap(builder.outdir, builder.pathmapper, entry)
visit_class(
[builder.files, builder.bindings], ("File", "Directory"), _check_adjust
)
if debug:
_logger.debug(
"[job %s] path mappings is %s",
j.name,
json_dumps(
{
p: builder.pathmapper.mapper(p)
for p in builder.pathmapper.files()
},
indent=4,
),
)
if self.tool.get("stdin"):
with SourceLine(self.tool, "stdin", ValidationException, debug):
j.stdin = cast(str, builder.do_eval(self.tool["stdin"]))
if j.stdin:
reffiles.append({"class": "File", "path": j.stdin})
if self.tool.get("stderr"):
with SourceLine(self.tool, "stderr", ValidationException, debug):
j.stderr = cast(str, builder.do_eval(self.tool["stderr"]))
if j.stderr:
if os.path.isabs(j.stderr) or ".." in j.stderr:
raise ValidationException(
"stderr must be a relative path, got '%s'" % j.stderr
)
if self.tool.get("stdout"):
with SourceLine(self.tool, "stdout", ValidationException, debug):
j.stdout = cast(str, builder.do_eval(self.tool["stdout"]))
if j.stdout:
if os.path.isabs(j.stdout) or ".." in j.stdout or not j.stdout:
raise ValidationException(
"stdout must be a relative path, got '%s'" % j.stdout
)
if debug:
_logger.debug(
"[job %s] command line bindings is %s",
j.name,
json_dumps(builder.bindings, indent=4),
)
dockerReq, _ = self.get_requirement("DockerRequirement")
if dockerReq is not None and runtimeContext.use_container:
out_dir, out_prefix = os.path.split(runtimeContext.tmp_outdir_prefix)
j.outdir = runtimeContext.outdir or tempfile.mkdtemp(
prefix=out_prefix, dir=out_dir
)
tmpdir_dir, tmpdir_prefix = os.path.split(runtimeContext.tmpdir_prefix)
j.tmpdir = runtimeContext.tmpdir or tempfile.mkdtemp(
prefix=tmpdir_prefix, dir=tmpdir_dir
)
j.stagedir = tempfile.mkdtemp(prefix=tmpdir_prefix, dir=tmpdir_dir)
else:
j.outdir = builder.outdir
j.tmpdir = builder.tmpdir
j.stagedir = builder.stagedir
inplaceUpdateReq, _ = self.get_requirement("InplaceUpdateRequirement")
if inplaceUpdateReq is not None:
j.inplace_update = cast(bool, inplaceUpdateReq["inplaceUpdate"])
normalizeFilesDirs(j.generatefiles)
readers = {}
muts = set()
if builder.mutation_manager is not None:
def register_mut(f: CWLObjectType) -> None:
mm = cast(MutationManager, builder.mutation_manager)
muts.add(cast(str, f["location"]))
mm.register_mutation(j.name, f)
def register_reader(f: CWLObjectType) -> None:
mm = cast(MutationManager, builder.mutation_manager)
if cast(str, f["location"]) not in muts:
mm.register_reader(j.name, f)
readers[cast(str, f["location"])] = copy.deepcopy(f)
for li in j.generatefiles["listing"]:
if li.get("writable") and j.inplace_update:
adjustFileObjs(li, register_mut)
adjustDirObjs(li, register_mut)
else:
adjustFileObjs(li, register_reader)
adjustDirObjs(li, register_reader)
adjustFileObjs(builder.files, register_reader)
adjustFileObjs(builder.bindings, register_reader)
adjustDirObjs(builder.files, register_reader)
adjustDirObjs(builder.bindings, register_reader)
timelimit, _ = self.get_requirement("ToolTimeLimit")
if timelimit is not None:
with SourceLine(timelimit, "timelimit", ValidationException, debug):
j.timelimit = cast(
Optional[int],
builder.do_eval(cast(Union[int, str], timelimit["timelimit"])),
)
if not isinstance(j.timelimit, int) or j.timelimit < 0:
raise WorkflowException(
"timelimit must be an integer >= 0, got: %s" % j.timelimit
)
networkaccess, _ = self.get_requirement("NetworkAccess")
if networkaccess is not None:
with SourceLine(networkaccess, "networkAccess", ValidationException, debug):
j.networkaccess = cast(
bool,
builder.do_eval(
cast(Union[bool, str], networkaccess["networkAccess"])
),
)
if not isinstance(j.networkaccess, bool):
raise WorkflowException(
"networkAccess must be a boolean, got: %s" % j.networkaccess
)
j.environment = {}
evr, _ = self.get_requirement("EnvVarRequirement")
if evr is not None:
for t3 in cast(List[Dict[str, str]], evr["envDef"]):
j.environment[t3["envName"]] = cast(
str, builder.do_eval(t3["envValue"])
)
shellcmd, _ = self.get_requirement("ShellCommandRequirement")
if shellcmd is not None:
cmd = []
for b in builder.bindings:
arg = builder.generate_arg(b)
if b.get("shellQuote", True):
arg = [shellescape.quote(a) for a in aslist(arg)]
cmd.extend(aslist(arg))
j.command_line = ["/bin/sh", "-c", " ".join(cmd)]
else:
j.command_line = flatten(list(map(builder.generate_arg, builder.bindings)))
j.pathmapper = builder.pathmapper
j.collect_outputs = partial(
self.collect_output_ports,
self.tool["outputs"],
builder,
compute_checksum=getdefault(runtimeContext.compute_checksum, True),
jobname=jobname,
readers=readers,
)
j.output_callback = output_callbacks
mpi, _ = self.get_requirement(MPIRequirementName)
if mpi is not None:
np = cast(
Union[int, str],
mpi.get("processes", runtimeContext.mpi_config.default_nproc),
)
if isinstance(np, str):
tmp = builder.do_eval(np)
if not isinstance(tmp, int):
raise TypeError(
"{} needs 'processes' to evaluate to an int, got {}".format(
MPIRequirementName, type(np)
)
)
np = tmp
j.mpi_procs = np
yield j
def collect_output_ports(
self,
ports: Union[CommentedSeq, Set[CWLObjectType]],
builder: Builder,
outdir: str,
rcode: int,
compute_checksum: bool = True,
jobname: str = "",
readers: Optional[MutableMapping[str, CWLObjectType]] = None,
) -> OutputPortsType:
ret = {}
debug = _logger.isEnabledFor(logging.DEBUG)
cwl_version = self.metadata.get(
"http://commonwl.org/cwltool#original_cwlVersion", None
)
if cwl_version != "v1.0":
builder.resources["exitCode"] = rcode
try:
fs_access = builder.make_fs_access(outdir)
custom_output = fs_access.join(outdir, "cwl.output.json")
if fs_access.exists(custom_output):
with fs_access.open(custom_output, "r") as f:
ret = json.load(f)
if debug:
_logger.debug(
"Raw output from %s: %s",
custom_output,
json_dumps(ret, indent=4),
)
else:
for i, port in enumerate(ports):
with SourceLine(
ports,
i,
partial(ParameterOutputWorkflowException, port=port),
debug,
):
fragment = shortname(port["id"])
ret[fragment] = self.collect_output(
port,
builder,
outdir,
fs_access,
compute_checksum=compute_checksum,
)
if ret:
revmap = partial(revmap_file, builder, outdir)
adjustDirObjs(ret, trim_listing)
visit_class(ret, ("File", "Directory"), revmap)
visit_class(ret, ("File", "Directory"), remove_path)
normalizeFilesDirs(ret)
visit_class(
ret,
("File", "Directory"),
partial(check_valid_locations, fs_access),
)
if compute_checksum:
adjustFileObjs(ret, partial(compute_checksums, fs_access))
expected_schema = cast(
Schema, self.names.get_name("outputs_record_schema", None)
)
validate_ex(
expected_schema, ret, strict=False, logger=_logger_validation_warnings
)
if ret is not None and builder.mutation_manager is not None:
adjustFileObjs(ret, builder.mutation_manager.set_generation)
return ret if ret is not None else {}
except ValidationException as e:
raise WorkflowException(
"Error validating output record. "
+ str(e)
+ "\n in "
+ json_dumps(ret, indent=4)
) from e
finally:
if builder.mutation_manager and readers:
for r in readers.values():
builder.mutation_manager.release_reader(jobname, r)
def collect_output(
self,
schema: CWLObjectType,
builder: Builder,
outdir: str,
fs_access: StdFsAccess,
compute_checksum: bool = True,
) -> Optional[CWLOutputType]:
r = []
empty_and_optional = False
debug = _logger.isEnabledFor(logging.DEBUG)
if "outputBinding" in schema:
binding = cast(
MutableMapping[str, Union[bool, str, List[str]]],
schema["outputBinding"],
)
globpatterns = []
revmap = partial(revmap_file, builder, outdir)
if "glob" in binding:
with SourceLine(binding, "glob", WorkflowException, debug):
for gb in aslist(binding["glob"]):
gb = builder.do_eval(gb)
if gb:
globpatterns.extend(aslist(gb))
for gb in globpatterns:
if gb.startswith(builder.outdir):
gb = gb[len(builder.outdir) + 1 :]
elif gb == ".":
gb = outdir
elif gb.startswith("/"):
raise WorkflowException(
"glob patterns must not start with '/'"
)
try:
prefix = fs_access.glob(outdir)
r.extend(
[
{
"location": g,
"path": fs_access.join(
builder.outdir, g[len(prefix[0]) + 1 :]
),
"basename": os.path.basename(g),
"nameroot": os.path.splitext(
os.path.basename(g)
)[0],
"nameext": os.path.splitext(
os.path.basename(g)
)[1],
"class": "File"
if fs_access.isfile(g)
else "Directory",
}
for g in sorted(
fs_access.glob(fs_access.join(outdir, gb)),
key=cmp_to_key(
cast(
Callable[[str, str], int],
locale.strcoll,
)
),
)
]
)
except (OSError, IOError) as e:
_logger.warning(str(e))
except Exception:
_logger.error(
"Unexpected error from fs_access", exc_info=True
)
raise
for files in cast(List[Dict[str, Optional[CWLOutputType]]], r):
rfile = files.copy()
revmap(rfile)
if files["class"] == "Directory":
ll = schema.get("loadListing") or builder.loadListing
if ll and ll != "no_listing":
get_listing(fs_access, files, (ll == "deep_listing"))
else:
if binding.get("loadContents"):
with fs_access.open(
cast(str, rfile["location"]), "rb"
) as f:
files["contents"] = content_limit_respected_read_bytes(
f
).decode("utf-8")
if compute_checksum:
with fs_access.open(
cast(str, rfile["location"]), "rb"
) as f:
checksum = hashlib.sha1()
contents = f.read(1024 * 1024)
while contents != b"":
checksum.update(contents)
contents = f.read(1024 * 1024)
files["checksum"] = "sha1$%s" % checksum.hexdigest()
files["size"] = fs_access.size(cast(str, rfile["location"]))
optional = False
single = False
if isinstance(schema["type"], MutableSequence):
if "null" in schema["type"]:
optional = True
if "File" in schema["type"] or "Directory" in schema["type"]:
single = True
elif schema["type"] == "File" or schema["type"] == "Directory":
single = True
if "outputEval" in binding:
with SourceLine(binding, "outputEval", WorkflowException, debug):
result = builder.do_eval(
cast(CWLOutputType, binding["outputEval"]), context=r
)
else:
result = cast(CWLOutputType, r)
if single:
if not result and not optional:
with SourceLine(binding, "glob", WorkflowException, debug):
raise WorkflowException(
"Did not find output file with glob pattern: '{}'".format(
globpatterns
)
)
elif not result and optional:
pass
elif isinstance(result, MutableSequence):
if len(result) > 1:
raise WorkflowException(
"Multiple matches for output item that is a single file."
)
else:
result = cast(CWLOutputType, result[0])
if "secondaryFiles" in schema:
with SourceLine(schema, "secondaryFiles", WorkflowException, debug):
for primary in aslist(result):
if isinstance(primary, MutableMapping):
primary.setdefault("secondaryFiles", [])
pathprefix = primary["path"][
0 : primary["path"].rindex(os.sep) + 1
]
for sf in aslist(schema["secondaryFiles"]):
if "required" in sf:
sf_required = builder.do_eval(
sf["required"], context=primary
)
else:
sf_required = False
if "$(" in sf["pattern"] or "${" in sf["pattern"]:
sfpath = builder.do_eval(
sf["pattern"], context=primary
)
else:
sfpath = substitute(
primary["basename"], sf["pattern"]
)
for sfitem in aslist(sfpath):
if not sfitem:
continue
if isinstance(sfitem, str):
sfitem = {"path": pathprefix + sfitem}
if (
not fs_access.exists(sfitem["path"])
and sf_required
):
raise WorkflowException(
"Missing required secondary file '%s'"
% (sfitem["path"])
)
if "path" in sfitem and "location" not in sfitem:
revmap(sfitem)
if fs_access.isfile(sfitem["location"]):
sfitem["class"] = "File"
primary["secondaryFiles"].append(sfitem)
elif fs_access.isdir(sfitem["location"]):
sfitem["class"] = "Directory"
primary["secondaryFiles"].append(sfitem)
if "format" in schema:
for primary in aslist(result):
primary["format"] = builder.do_eval(
schema["format"], context=primary
)
adjustFileObjs(result, revmap)
if not result and optional:
if result in [0, ""]:
return result
# For [] or None, return None
else:
return None
if (
not empty_and_optional
and isinstance(schema["type"], MutableMapping)
and schema["type"]["type"] == "record"
):
out = {}
for field in cast(List[CWLObjectType], schema["type"]["fields"]):
out[shortname(cast(str, field["name"]))] = self.collect_output(
field, builder, outdir, fs_access, compute_checksum=compute_checksum
)
return out
return result
| true | true |
1c3ab21238dd0f028ba1695c2193bb7d985c1df8 | 1,899 | py | Python | run.py | SimonLaplacing/Lattice_Planner | bb2b7a0311f261b7ad333688044be50585bcb6f5 | [
"MIT"
] | null | null | null | run.py | SimonLaplacing/Lattice_Planner | bb2b7a0311f261b7ad333688044be50585bcb6f5 | [
"MIT"
] | null | null | null | run.py | SimonLaplacing/Lattice_Planner | bb2b7a0311f261b7ad333688044be50585bcb6f5 | [
"MIT"
] | 1 | 2022-02-18T06:13:59.000Z | 2022-02-18T06:13:59.000Z | import glob
import os
import sys
import gym
import carla_gym
import inspect
import argparse
import numpy as np
import os.path as osp
from pathlib import Path
currentPath = osp.dirname(osp.abspath(inspect.getfile(inspect.currentframe())))
# sys.path.insert(1, currentPath + '/agents/stable_baselines/')
import shutil
from config import cfg, log_config_to_file, cfg_from_list, cfg_from_yaml_file
def parse_args_cfgs():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg_file', type=str, default='config.yaml', help='specify the config file')
parser.add_argument('--env', help='environment ID', type=str, default='CarlaGymEnv-v1')
parser.add_argument('--play_mode', type=int, help='Display mode: 0:off, 1:2D, 2:3D ', default=1)
parser.add_argument('--carla_host', metavar='H', default='localhost', help='IP of the host server (default: 127.0.0.1)')
parser.add_argument('-p', '--carla_port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)')
parser.add_argument('--tm_port', default=2000, type=int, help='Traffic Manager TCP port to listen to (default: 8000)')
parser.add_argument('--carla_res', metavar='WIDTHxHEIGHT', default='1280x720', help='window resolution (default: 1280x720)')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
return args, cfg
if __name__ == '__main__':
args, cfg = parse_args_cfgs()
print('Env is starting')
env = gym.make(args.env)
if args.play_mode:
env.enable_auto_render()
env.begin_modules(args)
try:
env.reset()
while True:
_, _, done, _ = env.step()
env.render()
if done:
env.reset()
finally:
env.destroy()
| 35.830189 | 128 | 0.679305 | import glob
import os
import sys
import gym
import carla_gym
import inspect
import argparse
import numpy as np
import os.path as osp
from pathlib import Path
currentPath = osp.dirname(osp.abspath(inspect.getfile(inspect.currentframe())))
import shutil
from config import cfg, log_config_to_file, cfg_from_list, cfg_from_yaml_file
def parse_args_cfgs():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg_file', type=str, default='config.yaml', help='specify the config file')
parser.add_argument('--env', help='environment ID', type=str, default='CarlaGymEnv-v1')
parser.add_argument('--play_mode', type=int, help='Display mode: 0:off, 1:2D, 2:3D ', default=1)
parser.add_argument('--carla_host', metavar='H', default='localhost', help='IP of the host server (default: 127.0.0.1)')
parser.add_argument('-p', '--carla_port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)')
parser.add_argument('--tm_port', default=2000, type=int, help='Traffic Manager TCP port to listen to (default: 8000)')
parser.add_argument('--carla_res', metavar='WIDTHxHEIGHT', default='1280x720', help='window resolution (default: 1280x720)')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1])
return args, cfg
if __name__ == '__main__':
args, cfg = parse_args_cfgs()
print('Env is starting')
env = gym.make(args.env)
if args.play_mode:
env.enable_auto_render()
env.begin_modules(args)
try:
env.reset()
while True:
_, _, done, _ = env.step()
env.render()
if done:
env.reset()
finally:
env.destroy()
| true | true |
1c3ab214cffa2f8e86e722767cbaba250cd2f557 | 4,854 | py | Python | provisioning/profile.py | manicmaniac/provisioning | ca29b9ee6f4274489bbf8fbc79b44ebcfc22e66a | [
"MIT"
] | 4 | 2018-11-05T10:33:57.000Z | 2019-10-11T16:12:15.000Z | provisioning/profile.py | manicmaniac/provisioning | ca29b9ee6f4274489bbf8fbc79b44ebcfc22e66a | [
"MIT"
] | null | null | null | provisioning/profile.py | manicmaniac/provisioning | ca29b9ee6f4274489bbf8fbc79b44ebcfc22e66a | [
"MIT"
] | 2 | 2021-01-21T03:45:54.000Z | 2021-12-30T00:55:50.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import plistlib
import uuid
import getpass
import platform
import os
from datetime import datetime
from OpenSSL.crypto import FILETYPE_ASN1, load_certificate
from entitlements import Entitlements
class ProvisioningProfile(object):
'''
Apple's provisioning profile.
'''
def __init__(self, data):
self._data = data
self._plist = self._extract_plist(data)
self._developer_certificates = None
self._entitlements = None
self._uuid = None
def is_expired(self, date=datetime.now()):
'''
:param date: date to compare
:type date: datetime.datetime
:returns: True if expired
:rtype: bool
'''
return self.expiration_date < date
def _extract_plist(self, data):
'''
Simplified-PKCS7 DEFINITIONS EXPLICIT TAGS ::= BEGIN
SignedXML ::= SEQUENCE {
contentType OBJECT IDENTIFIER, -- { 1 2 840 113549 1 7 2 }
content [0] SEQUENCE {
version INTEGER,
digestAlgorithms ANY,
contentInfo SEQUENCE {
contentType OBJECT IDENTIFIER, -- { 1 2 840 113549 1 7 1 }
contentXML [0] OCTET STRING
},
...
}
}
END
'''
try:
from pyasn1.codec.der import decoder
except ImportError:
plist = data[62, data.rfind('</plist>')]
else:
plist = str(decoder.decode(data)[0][1][2][1])
return plistlib.readPlistFromString(plist)
@property
def data(self):
'''
:returns: encrypted binary
:rtype: str
'''
return self._data
@property
def application_identifier_prefix(self):
'''
:returns: ApplicationIdentifierPrefix
:rtype: str
'''
return self._plist['ApplicationIdentifierPrefix']
@property
def creation_date(self):
'''
:returns: CreationDate
:rtype: datetime.datetime
'''
return self._plist['CreationDate']
@property
def developer_certificates(self):
'''
:returns: DeveloperCertificates
:rtype: list of OpenSSL.crypto.X509 instance
'''
if self._developer_certificates is None:
self._developer_certificates = []
for item in self._plist['DeveloperCertificates']:
certificate = load_certificate(FILETYPE_ASN1, item.data)
self._developer_certificates.append(certificate)
return self._developer_certificates
@property
def entitlements(self):
'''
:returns: Entitlements
:rtype: provisioning.entitlements.Entitlements
'''
if self._entitlements is None:
self._entitlements = Entitlements.from_dict(self._plist['Entitlements'])
return self._entitlements
@property
def expiration_date(self):
'''
:returns: ExpirationDate
:rtype: datetime.datetime
'''
return self._plist['ExpirationDate']
@property
def name(self):
'''
:returns: Name
:rtype: str
'''
return self._plist['Name']
@property
def provisioned_devices(self):
'''
:returns: UDIDs of ProvisionedDevices
:rtype: list of str
'''
return self._plist['ProvisionedDevices']
@property
def time_to_live(self):
'''
:returns: TimeToLive
:rtype: int
'''
return self._plist['TimeToLive']
@property
def uuid(self):
'''
:returns: UUID
:rtype: uuid.UUID
'''
if self._uuid is None:
self._uuid = uuid.UUID(self._plist['UUID'])
return self._uuid
@property
def version(self):
'''
:returns: Version
:rtype: str
'''
return str(self._plist['Version'])
PROVISIONING_PROFILES_DIR = '/Users/{user}/Library/MobileDevice/Provisioning Profiles'
def stored_provisioning_profiles(user=getpass.getuser()):
'''
:param user: user
:type user: str
:returns: ProvisioningProfiles stored in user directory
:rtype: list of provisioning.profile.ProvisioningProfile
'''
if platform.system() != 'Darwin':
raise OSError('only permitted on OSX.')
base_path = PROVISIONING_PROFILES_DIR.format(user=user)
provisioning_profiles = []
for item in os.listdir(base_path):
path = os.path.join(base_path, item)
if path.endswith('.provisioningprofile') or path.endswith('.mobileprovision'):
with open(path) as f:
data = f.read()
provisioning_profiles.append(ProvisioningProfile(data))
return provisioning_profiles
| 27.117318 | 86 | 0.585909 |
import plistlib
import uuid
import getpass
import platform
import os
from datetime import datetime
from OpenSSL.crypto import FILETYPE_ASN1, load_certificate
from entitlements import Entitlements
class ProvisioningProfile(object):
def __init__(self, data):
self._data = data
self._plist = self._extract_plist(data)
self._developer_certificates = None
self._entitlements = None
self._uuid = None
def is_expired(self, date=datetime.now()):
return self.expiration_date < date
def _extract_plist(self, data):
try:
from pyasn1.codec.der import decoder
except ImportError:
plist = data[62, data.rfind('</plist>')]
else:
plist = str(decoder.decode(data)[0][1][2][1])
return plistlib.readPlistFromString(plist)
@property
def data(self):
return self._data
@property
def application_identifier_prefix(self):
return self._plist['ApplicationIdentifierPrefix']
@property
def creation_date(self):
return self._plist['CreationDate']
@property
def developer_certificates(self):
if self._developer_certificates is None:
self._developer_certificates = []
for item in self._plist['DeveloperCertificates']:
certificate = load_certificate(FILETYPE_ASN1, item.data)
self._developer_certificates.append(certificate)
return self._developer_certificates
@property
def entitlements(self):
if self._entitlements is None:
self._entitlements = Entitlements.from_dict(self._plist['Entitlements'])
return self._entitlements
@property
def expiration_date(self):
return self._plist['ExpirationDate']
@property
def name(self):
return self._plist['Name']
@property
def provisioned_devices(self):
return self._plist['ProvisionedDevices']
@property
def time_to_live(self):
return self._plist['TimeToLive']
@property
def uuid(self):
if self._uuid is None:
self._uuid = uuid.UUID(self._plist['UUID'])
return self._uuid
@property
def version(self):
return str(self._plist['Version'])
PROVISIONING_PROFILES_DIR = '/Users/{user}/Library/MobileDevice/Provisioning Profiles'
def stored_provisioning_profiles(user=getpass.getuser()):
if platform.system() != 'Darwin':
raise OSError('only permitted on OSX.')
base_path = PROVISIONING_PROFILES_DIR.format(user=user)
provisioning_profiles = []
for item in os.listdir(base_path):
path = os.path.join(base_path, item)
if path.endswith('.provisioningprofile') or path.endswith('.mobileprovision'):
with open(path) as f:
data = f.read()
provisioning_profiles.append(ProvisioningProfile(data))
return provisioning_profiles
| true | true |
1c3ab2c57a386d99a67787a8c1d69ac297abe034 | 2,586 | py | Python | securitycenter/noxfile.py | deryrahman/google-cloud-python | b55058c4b2328fde32f29bfd8ea04708fcc578e0 | [
"Apache-2.0"
] | 1 | 2020-10-25T04:39:41.000Z | 2020-10-25T04:39:41.000Z | securitycenter/noxfile.py | deryrahman/google-cloud-python | b55058c4b2328fde32f29bfd8ea04708fcc578e0 | [
"Apache-2.0"
] | 4 | 2018-11-13T22:15:36.000Z | 2018-12-07T18:31:38.000Z | securitycenter/noxfile.py | deryrahman/google-cloud-python | b55058c4b2328fde32f29bfd8ea04708fcc578e0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (
os.path.join('..', 'api_core'),
os.path.join('..', 'core'),
)
def default(session):
"""Run the unit test suite.
This is intended to be run **without** an interpreter set, so
that the current ``python`` (on the ``PATH``) or the version of
Python corresponding to the ``nox`` binary the ``PATH`` can
run the tests.
"""
session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.securitycenter_v1beta1',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=89', # TODO: Coverage should be raised to 97%
os.path.join('tests', 'unit'),
*session.posargs
)
@nox.session(python=['2.7', '3.5', '3.6', '3.7'])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python='3.6')
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google', 'tests')
@nox.session(python='3.6')
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install('docutils', 'pygments')
session.run('python', 'setup.py', 'check', '--restructuredtext',
'--strict')
@nox.session(python='3.6')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.chdir(os.path.dirname(__file__))
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
| 28.733333 | 75 | 0.651585 |
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (
os.path.join('..', 'api_core'),
os.path.join('..', 'core'),
)
def default(session):
session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.securitycenter_v1beta1',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=89',
os.path.join('tests', 'unit'),
*session.posargs
)
@nox.session(python=['2.7', '3.5', '3.6', '3.7'])
def unit(session):
default(session)
@nox.session(python='3.6')
def lint(session):
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google', 'tests')
@nox.session(python='3.6')
def lint_setup_py(session):
session.install('docutils', 'pygments')
session.run('python', 'setup.py', 'check', '--restructuredtext',
'--strict')
@nox.session(python='3.6')
def cover(session):
session.chdir(os.path.dirname(__file__))
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
| true | true |
1c3ab3b2e1c2b3127edc2cd733cf4b3563ef3be9 | 1,793 | py | Python | setup.py | tylerdave/baxter | 4c972e060c21f5d2b6d55dbbdb4407abf2e41cb1 | [
"MIT"
] | null | null | null | setup.py | tylerdave/baxter | 4c972e060c21f5d2b6d55dbbdb4407abf2e41cb1 | [
"MIT"
] | null | null | null | setup.py | tylerdave/baxter | 4c972e060c21f5d2b6d55dbbdb4407abf2e41cb1 | [
"MIT"
] | null | null | null | """A setuptools based setup module for Baxter"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from codecs import open
from os import path
from setuptools import setup, find_packages
import versioneer
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'HISTORY.rst'), encoding='utf-8') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
'click',
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='Baxter',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Callback handling and dispatching server",
long_description=readme + '\n\n' + history,
author="Dave Forgac",
author_email='tylerdave@tylerdave.com',
url='https://github.com/tylerdave/baxter',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
entry_points={
'console_scripts':[
'baxter=baxter.cli:cli',
],
},
include_package_data=True,
install_requires=requirements,
license="MIT",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
)
| 29.393443 | 76 | 0.643614 |
from codecs import open
from os import path
from setuptools import setup, find_packages
import versioneer
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'HISTORY.rst'), encoding='utf-8') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
'click',
]
test_requirements = [
]
setup(
name='Baxter',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Callback handling and dispatching server",
long_description=readme + '\n\n' + history,
author="Dave Forgac",
author_email='tylerdave@tylerdave.com',
url='https://github.com/tylerdave/baxter',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
entry_points={
'console_scripts':[
'baxter=baxter.cli:cli',
],
},
include_package_data=True,
install_requires=requirements,
license="MIT",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
)
| true | true |
1c3ab5d14a60389fd672956ef912ecce50ed78dc | 2,359 | py | Python | src/python/zensols/garmdown/backup.py | garmin-data/garmdown | 5f466ebb0b9450890d5fc75336ccafa3654a9464 | [
"MIT"
] | 14 | 2020-01-13T10:39:35.000Z | 2021-12-19T13:17:46.000Z | src/python/zensols/garmdown/backup.py | garmin-data/garmdown | 5f466ebb0b9450890d5fc75336ccafa3654a9464 | [
"MIT"
] | 4 | 2020-02-28T08:39:17.000Z | 2021-02-28T19:05:06.000Z | src/python/zensols/garmdown/backup.py | garmin-data/garmdown | 5f466ebb0b9450890d5fc75336ccafa3654a9464 | [
"MIT"
] | 6 | 2020-04-22T06:12:07.000Z | 2022-01-02T13:25:24.000Z | import logging
from pathlib import Path
from datetime import datetime
import shutil as su
from zensols.persist import persisted
from zensols.garmdown import (
Backup,
Persister,
)
logger = logging.getLogger(__name__)
class Backuper(object):
"""Backup the SQLite database on a periodic basis.
"""
def __init__(self, config):
"""Initialize.
:param config: the application configuration
"""
self.config = config
self.backup_params = self.config.populate(section='backup')
@property
@persisted('_persister')
def persister(self):
return Persister(self.config)
@property
@persisted('__backup_dir', cache_global=False)
def _backup_dir(self):
"""Return the directory to where we back up."""
backup_dir = self.config.db_backup_dir
if not backup_dir.exists():
logger.info(f'creating backup directory {backup_dir}')
backup_dir.mkdir(parents=True)
return backup_dir
def _execute(self):
"""Execute the backup of the SQLite database."""
persister = self.persister
backup_dir = self._backup_dir
src = persister.db_file
dst = Path(backup_dir, f'{src.name}-{Backup.timestr_from_datetime()}')
backup = Backup(dst)
logger.info(f'backing up database {src} -> {dst}')
su.copy(src, dst)
persister.insert_backup(backup)
def backup(self, force=False):
"""Backup the SQLite if the last backup time is older than what's specified in
the configuration.
:param force: if True, execute the backup regardless
"""
backup = self.persister.get_last_backup()
if force:
do_backup = True
else:
if backup is None:
logger.info('no recorded backup')
do_backup = True
else:
logger.debug(f'last backup: {backup}')
diff = datetime.now() - backup.time
diff_days = diff.days
logger.info(f'days since last backup: {diff_days} and we ' +
f'backup every {self.backup_params.days} days')
do_backup = diff_days >= self.backup_params.days
logger.debug(f'backing up: {do_backup}')
if do_backup:
self._execute()
| 31.039474 | 86 | 0.607037 | import logging
from pathlib import Path
from datetime import datetime
import shutil as su
from zensols.persist import persisted
from zensols.garmdown import (
Backup,
Persister,
)
logger = logging.getLogger(__name__)
class Backuper(object):
def __init__(self, config):
self.config = config
self.backup_params = self.config.populate(section='backup')
@property
@persisted('_persister')
def persister(self):
return Persister(self.config)
@property
@persisted('__backup_dir', cache_global=False)
def _backup_dir(self):
backup_dir = self.config.db_backup_dir
if not backup_dir.exists():
logger.info(f'creating backup directory {backup_dir}')
backup_dir.mkdir(parents=True)
return backup_dir
def _execute(self):
persister = self.persister
backup_dir = self._backup_dir
src = persister.db_file
dst = Path(backup_dir, f'{src.name}-{Backup.timestr_from_datetime()}')
backup = Backup(dst)
logger.info(f'backing up database {src} -> {dst}')
su.copy(src, dst)
persister.insert_backup(backup)
def backup(self, force=False):
backup = self.persister.get_last_backup()
if force:
do_backup = True
else:
if backup is None:
logger.info('no recorded backup')
do_backup = True
else:
logger.debug(f'last backup: {backup}')
diff = datetime.now() - backup.time
diff_days = diff.days
logger.info(f'days since last backup: {diff_days} and we ' +
f'backup every {self.backup_params.days} days')
do_backup = diff_days >= self.backup_params.days
logger.debug(f'backing up: {do_backup}')
if do_backup:
self._execute()
| true | true |
1c3ab77fe89a139978bf23a2f6badb56e94347eb | 463 | py | Python | divisors.py | nikithaanil/practice_python | 0afc06573dc2df684bc5afcf77d47057fe4811bb | [
"MIT"
] | null | null | null | divisors.py | nikithaanil/practice_python | 0afc06573dc2df684bc5afcf77d47057fe4811bb | [
"MIT"
] | null | null | null | divisors.py | nikithaanil/practice_python | 0afc06573dc2df684bc5afcf77d47057fe4811bb | [
"MIT"
] | null | null | null | '''Create a program that asks the user for a number and then prints out a list of all the divisors of that number.
(If you don’t know what a divisor is, it is a number that divides evenly into another number.
For example, 13 is a divisor of 26 because 26 / 13 has no remainder.)'''
num=int(input("enter a number: "))
limit=num+1
divisor_list=[]
for i in range(1,limit):
if num % i == 0:
divisor_list.append(i)
#print(i)
print(divisor_list)
| 30.866667 | 114 | 0.693305 |
num=int(input("enter a number: "))
limit=num+1
divisor_list=[]
for i in range(1,limit):
if num % i == 0:
divisor_list.append(i)
print(divisor_list)
| true | true |
1c3ab89982bccb51263a24e9dabba740aa178000 | 246 | py | Python | examples/congratulations_app/setup.py | maximsakhno/galo-ioc | d300cc0e63e6ad375b7d2e75ac2b2e2fda30da4f | [
"MIT"
] | 9 | 2022-01-16T11:45:00.000Z | 2022-03-23T07:42:24.000Z | examples/congratulations_app/setup.py | maximsakhno/galo-ioc | d300cc0e63e6ad375b7d2e75ac2b2e2fda30da4f | [
"MIT"
] | 2 | 2022-01-16T12:03:14.000Z | 2022-01-16T12:11:27.000Z | examples/congratulations_app/setup.py | maximsakhno/galo-ioc | d300cc0e63e6ad375b7d2e75ac2b2e2fda30da4f | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name="congratulations-app",
version="0.1.0",
author="Maxim Sakhno",
author_email="maxim.sakhno@iqtek.ru",
packages=find_packages(where="src"),
package_dir={"": "src"},
)
| 22.363636 | 43 | 0.670732 | from setuptools import find_packages, setup
setup(
name="congratulations-app",
version="0.1.0",
author="Maxim Sakhno",
author_email="maxim.sakhno@iqtek.ru",
packages=find_packages(where="src"),
package_dir={"": "src"},
)
| true | true |
1c3ab918d5b7e639c71ebfccc8ec4f95dda7aa5e | 24,047 | py | Python | plugins/keepkey/qt.py | D3m0nKingx/electrum-ganja | be204713107626f3e334e9fd5974c044a9f2ffb6 | [
"MIT"
] | null | null | null | plugins/keepkey/qt.py | D3m0nKingx/electrum-ganja | be204713107626f3e334e9fd5974c044a9f2ffb6 | [
"MIT"
] | null | null | null | plugins/keepkey/qt.py | D3m0nKingx/electrum-ganja | be204713107626f3e334e9fd5974c044a9f2ffb6 | [
"MIT"
] | null | null | null | from functools import partial
import threading
from PyQt5.Qt import Qt
from PyQt5.Qt import QGridLayout, QInputDialog, QPushButton
from PyQt5.Qt import QVBoxLayout, QLabel
from electrum_ganja_gui.qt.util import *
from electrum_ganja.i18n import _
from electrum_ganja.plugins import hook, DeviceMgr
from electrum_ganja.util import PrintError, UserCancelled, bh2u
from electrum_ganja.wallet import Wallet, Standard_Wallet
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from .keepkey import KeepKeyPlugin, TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
PASSPHRASE_HELP_SHORT =_(
"Passphrases allow you to access new wallets, each "
"hidden behind a particular case-sensitive passphrase.")
PASSPHRASE_HELP = PASSPHRASE_HELP_SHORT + " " + _(
"You need to create a separate Electrum-Ganja wallet for each passphrase "
"you use as they each generate different addresses. Changing "
"your passphrase does not lose other wallets, each is still "
"accessible behind its own passphrase.")
RECOMMEND_PIN = _(
"You should enable PIN protection. Your PIN is the only protection "
"for your Ganjacoins if your device is lost or stolen.")
PASSPHRASE_NOT_PIN = _(
"If you forget a passphrase you will be unable to access any "
"Ganjacoins in the wallet behind it. A passphrase is not a PIN. "
"Only change this if you are sure you understand it.")
CHARACTER_RECOVERY = (
"Use the recovery cipher shown on your device to input your seed words. "
"The cipher changes with every keypress.\n"
"After at most 4 letters the device will auto-complete a word.\n"
"Press SPACE or the Accept Word button to accept the device's auto-"
"completed word and advance to the next one.\n"
"Press BACKSPACE to go back a character or word.\n"
"Press ENTER or the Seed Entered button once the last word in your "
"seed is auto-completed.")
class CharacterButton(QPushButton):
def __init__(self, text=None):
QPushButton.__init__(self, text)
def keyPressEvent(self, event):
event.setAccepted(False) # Pass through Enter and Space keys
class CharacterDialog(WindowModalDialog):
def __init__(self, parent):
super(CharacterDialog, self).__init__(parent)
self.setWindowTitle(_("KeepKey Seed Recovery"))
self.character_pos = 0
self.word_pos = 0
self.loop = QEventLoop()
self.word_help = QLabel()
self.char_buttons = []
vbox = QVBoxLayout(self)
vbox.addWidget(WWLabel(CHARACTER_RECOVERY))
hbox = QHBoxLayout()
hbox.addWidget(self.word_help)
for i in range(4):
char_button = CharacterButton('*')
char_button.setMaximumWidth(36)
self.char_buttons.append(char_button)
hbox.addWidget(char_button)
self.accept_button = CharacterButton(_("Accept Word"))
self.accept_button.clicked.connect(partial(self.process_key, 32))
self.rejected.connect(partial(self.loop.exit, 1))
hbox.addWidget(self.accept_button)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.finished_button = QPushButton(_("Seed Entered"))
self.cancel_button = QPushButton(_("Cancel"))
self.finished_button.clicked.connect(partial(self.process_key,
Qt.Key_Return))
self.cancel_button.clicked.connect(self.rejected)
buttons = Buttons(self.finished_button, self.cancel_button)
vbox.addSpacing(40)
vbox.addLayout(buttons)
self.refresh()
self.show()
def refresh(self):
self.word_help.setText("Enter seed word %2d:" % (self.word_pos + 1))
self.accept_button.setEnabled(self.character_pos >= 3)
self.finished_button.setEnabled((self.word_pos in (11, 17, 23)
and self.character_pos >= 3))
for n, button in enumerate(self.char_buttons):
button.setEnabled(n == self.character_pos)
if n == self.character_pos:
button.setFocus()
def is_valid_alpha_space(self, key):
# Auto-completion requires at least 3 characters
if key == ord(' ') and self.character_pos >= 3:
return True
# Firmware aborts protocol if the 5th character is non-space
if self.character_pos >= 4:
return False
return (key >= ord('a') and key <= ord('z')
or (key >= ord('A') and key <= ord('Z')))
def process_key(self, key):
self.data = None
if key == Qt.Key_Return and self.finished_button.isEnabled():
self.data = {'done': True}
elif key == Qt.Key_Backspace and (self.word_pos or self.character_pos):
self.data = {'delete': True}
elif self.is_valid_alpha_space(key):
self.data = {'character': chr(key).lower()}
if self.data:
self.loop.exit(0)
def keyPressEvent(self, event):
self.process_key(event.key())
if not self.data:
QDialog.keyPressEvent(self, event)
def get_char(self, word_pos, character_pos):
self.word_pos = word_pos
self.character_pos = character_pos
self.refresh()
if self.loop.exec_():
self.data = None # User cancelled
class QtHandler(QtHandlerBase):
char_signal = pyqtSignal(object)
pin_signal = pyqtSignal(object)
close_char_dialog_signal = pyqtSignal()
def __init__(self, win, pin_matrix_widget_class, device):
super(QtHandler, self).__init__(win, device)
self.char_signal.connect(self.update_character_dialog)
self.pin_signal.connect(self.pin_dialog)
self.close_char_dialog_signal.connect(self._close_char_dialog)
self.pin_matrix_widget_class = pin_matrix_widget_class
self.character_dialog = None
def get_char(self, msg):
self.done.clear()
self.char_signal.emit(msg)
self.done.wait()
data = self.character_dialog.data
if not data or 'done' in data:
self.close_char_dialog_signal.emit()
return data
def _close_char_dialog(self):
if self.character_dialog:
self.character_dialog.accept()
self.character_dialog = None
def get_pin(self, msg):
self.done.clear()
self.pin_signal.emit(msg)
self.done.wait()
return self.response
def pin_dialog(self, msg):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
def update_character_dialog(self, msg):
if not self.character_dialog:
self.character_dialog = CharacterDialog(self.top_level_window())
self.character_dialog.get_char(msg.word_pos, msg.character_pos)
self.done.set()
class QtPlugin(QtPluginBase):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) == self.keystore_class and len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on {}").format(self.device), show_address)
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
if device_id:
SettingsDialog(window, self, keystore, device_id).exec_()
def request_trezor_init_settings(self, wizard, method, device):
vbox = QVBoxLayout()
next_enabled = True
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
def clean_text(widget):
text = widget.toPlainText().strip()
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
hbox1 = QHBoxLayout()
gb.setLayout(hbox1)
# KeepKey recovery doesn't need a word count
if method == TIM_NEW:
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
bg = QButtonGroup()
for i, count in enumerate([12, 18, 24]):
rb = QRadioButton(gb)
rb.setText(_("{} words").format(count))
bg.addButton(rb)
bg.setId(rb, i)
hbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
from electrum_ganja.keystore import is_xprv
wizard.next_button.setEnabled(is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
next_enabled = False
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,9}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(WWLabel(RECOMMEND_PIN))
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
passphrase_msg = WWLabel(PASSPHRASE_HELP_SHORT)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
cb_phrase = QCheckBox(_('Enable passphrases'))
cb_phrase.setChecked(False)
vbox.addWidget(passphrase_msg)
vbox.addWidget(passphrase_warning)
vbox.addWidget(cb_phrase)
wizard.exec_layout(vbox, next_enabled=next_enabled)
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, name.text(), pin, cb_phrase.isChecked())
class Plugin(KeepKeyPlugin, QtPlugin):
icon_paired = ":icons/keepkey.png"
icon_unpaired = ":icons/keepkey_unpaired.png"
@classmethod
def pin_matrix_widget_class(self):
from keepkeylib.qt.pinmatrix import PinMatrixWidget
return PinMatrixWidget
class SettingsDialog(WindowModalDialog):
'''This dialog doesn't require a device be paired with a wallet.
We want users to be able to wipe a device even if they've forgotten
their PIN.'''
def __init__(self, window, plugin, keystore, device_id):
title = _("{} Settings").format(plugin.device)
super(SettingsDialog, self).__init__(window, title)
self.setMaximumWidth(540)
devmgr = plugin.device_manager()
config = devmgr.config
handler = keystore.handler
thread = keystore.thread
hs_rows, hs_cols = (64, 128)
def invoke_client(method, *args, **kw_args):
unpair_after = kw_args.pop('unpair_after', False)
def task():
client = devmgr.client_by_id(device_id)
if not client:
raise RuntimeError("Device not connected")
if method:
getattr(client, method)(*args, **kw_args)
if unpair_after:
devmgr.unpair_id(device_id)
return client.features
thread.add(task, on_success=update)
def update(features):
self.features = features
set_label_enabled()
bl_hash = bh2u(features.bootloader_hash)
bl_hash = "\n".join([bl_hash[:32], bl_hash[32:]])
noyes = [_("No"), _("Yes")]
endis = [_("Enable Passphrases"), _("Disable Passphrases")]
disen = [_("Disabled"), _("Enabled")]
setchange = [_("Set a PIN"), _("Change PIN")]
version = "%d.%d.%d" % (features.major_version,
features.minor_version,
features.patch_version)
coins = ", ".join(coin.coin_name for coin in features.coins)
device_label.setText(features.label)
pin_set_label.setText(noyes[features.pin_protection])
passphrases_label.setText(disen[features.passphrase_protection])
bl_hash_label.setText(bl_hash)
label_edit.setText(features.label)
device_id_label.setText(features.device_id)
initialized_label.setText(noyes[features.initialized])
version_label.setText(version)
coins_label.setText(coins)
clear_pin_button.setVisible(features.pin_protection)
clear_pin_warning.setVisible(features.pin_protection)
pin_button.setText(setchange[features.pin_protection])
pin_msg.setVisible(not features.pin_protection)
passphrase_button.setText(endis[features.passphrase_protection])
language_label.setText(features.language)
def set_label_enabled():
label_apply.setEnabled(label_edit.text() != self.features.label)
def rename():
invoke_client('change_label', label_edit.text())
def toggle_passphrase():
title = _("Confirm Toggle Passphrase Protection")
currently_enabled = self.features.passphrase_protection
if currently_enabled:
msg = _("After disabling passphrases, you can only pair this "
"Electrum-Ganja wallet if it had an empty passphrase. "
"If its passphrase was not empty, you will need to "
"create a new wallet with the install wizard. You "
"can use this wallet again at any time by re-enabling "
"passphrases and entering its passphrase.")
else:
msg = _("Your current Electrum-Ganja wallet can only be used with "
"an empty passphrase. You must create a separate "
"wallet with the install wizard for other passphrases "
"as each one generates a new set of addresses.")
msg += "\n\n" + _("Are you sure you want to proceed?")
if not self.question(msg, title=title):
return
invoke_client('toggle_passphrase', unpair_after=currently_enabled)
def change_homescreen():
from PIL import Image # FIXME
dialog = QFileDialog(self, _("Choose Homescreen"))
filename, __ = dialog.getOpenFileName()
if filename:
im = Image.open(str(filename))
if im.size != (hs_cols, hs_rows):
raise Exception('Image must be 64 x 128 pixels')
im = im.convert('1')
pix = im.load()
img = ''
for j in range(hs_rows):
for i in range(hs_cols):
img += '1' if pix[i, j] else '0'
img = ''.join(chr(int(img[i:i + 8], 2))
for i in range(0, len(img), 8))
invoke_client('change_homescreen', img)
def clear_homescreen():
invoke_client('change_homescreen', '\x00')
def set_pin():
invoke_client('set_pin', remove=False)
def clear_pin():
invoke_client('set_pin', remove=True)
def wipe_device():
wallet = window.wallet
if wallet and sum(wallet.get_balance()):
title = _("Confirm Device Wipe")
msg = _("Are you SURE you want to wipe the device?\n"
"Your wallet still has Ganjacoins in it!")
if not self.question(msg, title=title,
icon=QMessageBox.Critical):
return
invoke_client('wipe_device', unpair_after=True)
def slider_moved():
mins = timeout_slider.sliderPosition()
timeout_minutes.setText(_("%2d minutes") % mins)
def slider_released():
config.set_session_timeout(timeout_slider.sliderPosition() * 60)
# Information tab
info_tab = QWidget()
info_layout = QVBoxLayout(info_tab)
info_glayout = QGridLayout()
info_glayout.setColumnStretch(2, 1)
device_label = QLabel()
pin_set_label = QLabel()
passphrases_label = QLabel()
version_label = QLabel()
device_id_label = QLabel()
bl_hash_label = QLabel()
bl_hash_label.setWordWrap(True)
coins_label = QLabel()
coins_label.setWordWrap(True)
language_label = QLabel()
initialized_label = QLabel()
rows = [
(_("Device Label"), device_label),
(_("PIN set"), pin_set_label),
(_("Passphrases"), passphrases_label),
(_("Firmware Version"), version_label),
(_("Device ID"), device_id_label),
(_("Bootloader Hash"), bl_hash_label),
(_("Supported Coins"), coins_label),
(_("Language"), language_label),
(_("Initialized"), initialized_label),
]
for row_num, (label, widget) in enumerate(rows):
info_glayout.addWidget(QLabel(label), row_num, 0)
info_glayout.addWidget(widget, row_num, 1)
info_layout.addLayout(info_glayout)
# Settings tab
settings_tab = QWidget()
settings_layout = QVBoxLayout(settings_tab)
settings_glayout = QGridLayout()
# Settings tab - Label
label_msg = QLabel(_("Name this {}. If you have multiple devices "
"their labels help distinguish them.")
.format(plugin.device))
label_msg.setWordWrap(True)
label_label = QLabel(_("Device Label"))
label_edit = QLineEdit()
label_edit.setMinimumWidth(150)
label_edit.setMaxLength(plugin.MAX_LABEL_LEN)
label_apply = QPushButton(_("Apply"))
label_apply.clicked.connect(rename)
label_edit.textChanged.connect(set_label_enabled)
settings_glayout.addWidget(label_label, 0, 0)
settings_glayout.addWidget(label_edit, 0, 1, 1, 2)
settings_glayout.addWidget(label_apply, 0, 3)
settings_glayout.addWidget(label_msg, 1, 1, 1, -1)
# Settings tab - PIN
pin_label = QLabel(_("PIN Protection"))
pin_button = QPushButton()
pin_button.clicked.connect(set_pin)
settings_glayout.addWidget(pin_label, 2, 0)
settings_glayout.addWidget(pin_button, 2, 1)
pin_msg = QLabel(_("PIN protection is strongly recommended. "
"A PIN is your only protection against someone "
"stealing your Ganjacoins if they obtain physical "
"access to your {}.").format(plugin.device))
pin_msg.setWordWrap(True)
pin_msg.setStyleSheet("color: red")
settings_glayout.addWidget(pin_msg, 3, 1, 1, -1)
# Settings tab - Session Timeout
timeout_label = QLabel(_("Session Timeout"))
timeout_minutes = QLabel()
timeout_slider = QSlider(Qt.Horizontal)
timeout_slider.setRange(1, 60)
timeout_slider.setSingleStep(1)
timeout_slider.setTickInterval(5)
timeout_slider.setTickPosition(QSlider.TicksBelow)
timeout_slider.setTracking(True)
timeout_msg = QLabel(
_("Clear the session after the specified period "
"of inactivity. Once a session has timed out, "
"your PIN and passphrase (if enabled) must be "
"re-entered to use the device."))
timeout_msg.setWordWrap(True)
timeout_slider.setSliderPosition(config.get_session_timeout() // 60)
slider_moved()
timeout_slider.valueChanged.connect(slider_moved)
timeout_slider.sliderReleased.connect(slider_released)
settings_glayout.addWidget(timeout_label, 6, 0)
settings_glayout.addWidget(timeout_slider, 6, 1, 1, 3)
settings_glayout.addWidget(timeout_minutes, 6, 4)
settings_glayout.addWidget(timeout_msg, 7, 1, 1, -1)
settings_layout.addLayout(settings_glayout)
settings_layout.addStretch(1)
# Advanced tab
advanced_tab = QWidget()
advanced_layout = QVBoxLayout(advanced_tab)
advanced_glayout = QGridLayout()
# Advanced tab - clear PIN
clear_pin_button = QPushButton(_("Disable PIN"))
clear_pin_button.clicked.connect(clear_pin)
clear_pin_warning = QLabel(
_("If you disable your PIN, anyone with physical access to your "
"{} device can spend your Ganjacoins.").format(plugin.device))
clear_pin_warning.setWordWrap(True)
clear_pin_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(clear_pin_button, 0, 2)
advanced_glayout.addWidget(clear_pin_warning, 1, 0, 1, 5)
# Advanced tab - toggle passphrase protection
passphrase_button = QPushButton()
passphrase_button.clicked.connect(toggle_passphrase)
passphrase_msg = WWLabel(PASSPHRASE_HELP)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(passphrase_button, 3, 2)
advanced_glayout.addWidget(passphrase_msg, 4, 0, 1, 5)
advanced_glayout.addWidget(passphrase_warning, 5, 0, 1, 5)
# Advanced tab - wipe device
wipe_device_button = QPushButton(_("Wipe Device"))
wipe_device_button.clicked.connect(wipe_device)
wipe_device_msg = QLabel(
_("Wipe the device, removing all data from it. The firmware "
"is left unchanged."))
wipe_device_msg.setWordWrap(True)
wipe_device_warning = QLabel(
_("Only wipe a device if you have the recovery seed written down "
"and the device wallet(s) are empty, otherwise the Ganjacoins "
"will be lost forever."))
wipe_device_warning.setWordWrap(True)
wipe_device_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(wipe_device_button, 6, 2)
advanced_glayout.addWidget(wipe_device_msg, 7, 0, 1, 5)
advanced_glayout.addWidget(wipe_device_warning, 8, 0, 1, 5)
advanced_layout.addLayout(advanced_glayout)
advanced_layout.addStretch(1)
tabs = QTabWidget(self)
tabs.addTab(info_tab, _("Information"))
tabs.addTab(settings_tab, _("Settings"))
tabs.addTab(advanced_tab, _("Advanced"))
dialog_vbox = QVBoxLayout(self)
dialog_vbox.addWidget(tabs)
dialog_vbox.addLayout(Buttons(CloseButton(self)))
# Update information
invoke_client(None)
| 40.965928 | 83 | 0.617749 | from functools import partial
import threading
from PyQt5.Qt import Qt
from PyQt5.Qt import QGridLayout, QInputDialog, QPushButton
from PyQt5.Qt import QVBoxLayout, QLabel
from electrum_ganja_gui.qt.util import *
from electrum_ganja.i18n import _
from electrum_ganja.plugins import hook, DeviceMgr
from electrum_ganja.util import PrintError, UserCancelled, bh2u
from electrum_ganja.wallet import Wallet, Standard_Wallet
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from .keepkey import KeepKeyPlugin, TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
PASSPHRASE_HELP_SHORT =_(
"Passphrases allow you to access new wallets, each "
"hidden behind a particular case-sensitive passphrase.")
PASSPHRASE_HELP = PASSPHRASE_HELP_SHORT + " " + _(
"You need to create a separate Electrum-Ganja wallet for each passphrase "
"you use as they each generate different addresses. Changing "
"your passphrase does not lose other wallets, each is still "
"accessible behind its own passphrase.")
RECOMMEND_PIN = _(
"You should enable PIN protection. Your PIN is the only protection "
"for your Ganjacoins if your device is lost or stolen.")
PASSPHRASE_NOT_PIN = _(
"If you forget a passphrase you will be unable to access any "
"Ganjacoins in the wallet behind it. A passphrase is not a PIN. "
"Only change this if you are sure you understand it.")
CHARACTER_RECOVERY = (
"Use the recovery cipher shown on your device to input your seed words. "
"The cipher changes with every keypress.\n"
"After at most 4 letters the device will auto-complete a word.\n"
"Press SPACE or the Accept Word button to accept the device's auto-"
"completed word and advance to the next one.\n"
"Press BACKSPACE to go back a character or word.\n"
"Press ENTER or the Seed Entered button once the last word in your "
"seed is auto-completed.")
class CharacterButton(QPushButton):
def __init__(self, text=None):
QPushButton.__init__(self, text)
def keyPressEvent(self, event):
event.setAccepted(False) # Pass through Enter and Space keys
class CharacterDialog(WindowModalDialog):
def __init__(self, parent):
super(CharacterDialog, self).__init__(parent)
self.setWindowTitle(_("KeepKey Seed Recovery"))
self.character_pos = 0
self.word_pos = 0
self.loop = QEventLoop()
self.word_help = QLabel()
self.char_buttons = []
vbox = QVBoxLayout(self)
vbox.addWidget(WWLabel(CHARACTER_RECOVERY))
hbox = QHBoxLayout()
hbox.addWidget(self.word_help)
for i in range(4):
char_button = CharacterButton('*')
char_button.setMaximumWidth(36)
self.char_buttons.append(char_button)
hbox.addWidget(char_button)
self.accept_button = CharacterButton(_("Accept Word"))
self.accept_button.clicked.connect(partial(self.process_key, 32))
self.rejected.connect(partial(self.loop.exit, 1))
hbox.addWidget(self.accept_button)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.finished_button = QPushButton(_("Seed Entered"))
self.cancel_button = QPushButton(_("Cancel"))
self.finished_button.clicked.connect(partial(self.process_key,
Qt.Key_Return))
self.cancel_button.clicked.connect(self.rejected)
buttons = Buttons(self.finished_button, self.cancel_button)
vbox.addSpacing(40)
vbox.addLayout(buttons)
self.refresh()
self.show()
def refresh(self):
self.word_help.setText("Enter seed word %2d:" % (self.word_pos + 1))
self.accept_button.setEnabled(self.character_pos >= 3)
self.finished_button.setEnabled((self.word_pos in (11, 17, 23)
and self.character_pos >= 3))
for n, button in enumerate(self.char_buttons):
button.setEnabled(n == self.character_pos)
if n == self.character_pos:
button.setFocus()
def is_valid_alpha_space(self, key):
# Auto-completion requires at least 3 characters
if key == ord(' ') and self.character_pos >= 3:
return True
# Firmware aborts protocol if the 5th character is non-space
if self.character_pos >= 4:
return False
return (key >= ord('a') and key <= ord('z')
or (key >= ord('A') and key <= ord('Z')))
def process_key(self, key):
self.data = None
if key == Qt.Key_Return and self.finished_button.isEnabled():
self.data = {'done': True}
elif key == Qt.Key_Backspace and (self.word_pos or self.character_pos):
self.data = {'delete': True}
elif self.is_valid_alpha_space(key):
self.data = {'character': chr(key).lower()}
if self.data:
self.loop.exit(0)
def keyPressEvent(self, event):
self.process_key(event.key())
if not self.data:
QDialog.keyPressEvent(self, event)
def get_char(self, word_pos, character_pos):
self.word_pos = word_pos
self.character_pos = character_pos
self.refresh()
if self.loop.exec_():
self.data = None # User cancelled
class QtHandler(QtHandlerBase):
char_signal = pyqtSignal(object)
pin_signal = pyqtSignal(object)
close_char_dialog_signal = pyqtSignal()
def __init__(self, win, pin_matrix_widget_class, device):
super(QtHandler, self).__init__(win, device)
self.char_signal.connect(self.update_character_dialog)
self.pin_signal.connect(self.pin_dialog)
self.close_char_dialog_signal.connect(self._close_char_dialog)
self.pin_matrix_widget_class = pin_matrix_widget_class
self.character_dialog = None
def get_char(self, msg):
self.done.clear()
self.char_signal.emit(msg)
self.done.wait()
data = self.character_dialog.data
if not data or 'done' in data:
self.close_char_dialog_signal.emit()
return data
def _close_char_dialog(self):
if self.character_dialog:
self.character_dialog.accept()
self.character_dialog = None
def get_pin(self, msg):
self.done.clear()
self.pin_signal.emit(msg)
self.done.wait()
return self.response
def pin_dialog(self, msg):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
def update_character_dialog(self, msg):
if not self.character_dialog:
self.character_dialog = CharacterDialog(self.top_level_window())
self.character_dialog.get_char(msg.word_pos, msg.character_pos)
self.done.set()
class QtPlugin(QtPluginBase):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) == self.keystore_class and len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on {}").format(self.device), show_address)
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
if device_id:
SettingsDialog(window, self, keystore, device_id).exec_()
def request_trezor_init_settings(self, wizard, method, device):
vbox = QVBoxLayout()
next_enabled = True
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
def clean_text(widget):
text = widget.toPlainText().strip()
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
hbox1 = QHBoxLayout()
gb.setLayout(hbox1)
# KeepKey recovery doesn't need a word count
if method == TIM_NEW:
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
bg = QButtonGroup()
for i, count in enumerate([12, 18, 24]):
rb = QRadioButton(gb)
rb.setText(_("{} words").format(count))
bg.addButton(rb)
bg.setId(rb, i)
hbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
from electrum_ganja.keystore import is_xprv
wizard.next_button.setEnabled(is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
next_enabled = False
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,9}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(WWLabel(RECOMMEND_PIN))
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
passphrase_msg = WWLabel(PASSPHRASE_HELP_SHORT)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
cb_phrase = QCheckBox(_('Enable passphrases'))
cb_phrase.setChecked(False)
vbox.addWidget(passphrase_msg)
vbox.addWidget(passphrase_warning)
vbox.addWidget(cb_phrase)
wizard.exec_layout(vbox, next_enabled=next_enabled)
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, name.text(), pin, cb_phrase.isChecked())
class Plugin(KeepKeyPlugin, QtPlugin):
icon_paired = ":icons/keepkey.png"
icon_unpaired = ":icons/keepkey_unpaired.png"
@classmethod
def pin_matrix_widget_class(self):
from keepkeylib.qt.pinmatrix import PinMatrixWidget
return PinMatrixWidget
class SettingsDialog(WindowModalDialog):
def __init__(self, window, plugin, keystore, device_id):
title = _("{} Settings").format(plugin.device)
super(SettingsDialog, self).__init__(window, title)
self.setMaximumWidth(540)
devmgr = plugin.device_manager()
config = devmgr.config
handler = keystore.handler
thread = keystore.thread
hs_rows, hs_cols = (64, 128)
def invoke_client(method, *args, **kw_args):
unpair_after = kw_args.pop('unpair_after', False)
def task():
client = devmgr.client_by_id(device_id)
if not client:
raise RuntimeError("Device not connected")
if method:
getattr(client, method)(*args, **kw_args)
if unpair_after:
devmgr.unpair_id(device_id)
return client.features
thread.add(task, on_success=update)
def update(features):
self.features = features
set_label_enabled()
bl_hash = bh2u(features.bootloader_hash)
bl_hash = "\n".join([bl_hash[:32], bl_hash[32:]])
noyes = [_("No"), _("Yes")]
endis = [_("Enable Passphrases"), _("Disable Passphrases")]
disen = [_("Disabled"), _("Enabled")]
setchange = [_("Set a PIN"), _("Change PIN")]
version = "%d.%d.%d" % (features.major_version,
features.minor_version,
features.patch_version)
coins = ", ".join(coin.coin_name for coin in features.coins)
device_label.setText(features.label)
pin_set_label.setText(noyes[features.pin_protection])
passphrases_label.setText(disen[features.passphrase_protection])
bl_hash_label.setText(bl_hash)
label_edit.setText(features.label)
device_id_label.setText(features.device_id)
initialized_label.setText(noyes[features.initialized])
version_label.setText(version)
coins_label.setText(coins)
clear_pin_button.setVisible(features.pin_protection)
clear_pin_warning.setVisible(features.pin_protection)
pin_button.setText(setchange[features.pin_protection])
pin_msg.setVisible(not features.pin_protection)
passphrase_button.setText(endis[features.passphrase_protection])
language_label.setText(features.language)
def set_label_enabled():
label_apply.setEnabled(label_edit.text() != self.features.label)
def rename():
invoke_client('change_label', label_edit.text())
def toggle_passphrase():
title = _("Confirm Toggle Passphrase Protection")
currently_enabled = self.features.passphrase_protection
if currently_enabled:
msg = _("After disabling passphrases, you can only pair this "
"Electrum-Ganja wallet if it had an empty passphrase. "
"If its passphrase was not empty, you will need to "
"create a new wallet with the install wizard. You "
"can use this wallet again at any time by re-enabling "
"passphrases and entering its passphrase.")
else:
msg = _("Your current Electrum-Ganja wallet can only be used with "
"an empty passphrase. You must create a separate "
"wallet with the install wizard for other passphrases "
"as each one generates a new set of addresses.")
msg += "\n\n" + _("Are you sure you want to proceed?")
if not self.question(msg, title=title):
return
invoke_client('toggle_passphrase', unpair_after=currently_enabled)
def change_homescreen():
from PIL import Image
dialog = QFileDialog(self, _("Choose Homescreen"))
filename, __ = dialog.getOpenFileName()
if filename:
im = Image.open(str(filename))
if im.size != (hs_cols, hs_rows):
raise Exception('Image must be 64 x 128 pixels')
im = im.convert('1')
pix = im.load()
img = ''
for j in range(hs_rows):
for i in range(hs_cols):
img += '1' if pix[i, j] else '0'
img = ''.join(chr(int(img[i:i + 8], 2))
for i in range(0, len(img), 8))
invoke_client('change_homescreen', img)
def clear_homescreen():
invoke_client('change_homescreen', '\x00')
def set_pin():
invoke_client('set_pin', remove=False)
def clear_pin():
invoke_client('set_pin', remove=True)
def wipe_device():
wallet = window.wallet
if wallet and sum(wallet.get_balance()):
title = _("Confirm Device Wipe")
msg = _("Are you SURE you want to wipe the device?\n"
"Your wallet still has Ganjacoins in it!")
if not self.question(msg, title=title,
icon=QMessageBox.Critical):
return
invoke_client('wipe_device', unpair_after=True)
def slider_moved():
mins = timeout_slider.sliderPosition()
timeout_minutes.setText(_("%2d minutes") % mins)
def slider_released():
config.set_session_timeout(timeout_slider.sliderPosition() * 60)
info_tab = QWidget()
info_layout = QVBoxLayout(info_tab)
info_glayout = QGridLayout()
info_glayout.setColumnStretch(2, 1)
device_label = QLabel()
pin_set_label = QLabel()
passphrases_label = QLabel()
version_label = QLabel()
device_id_label = QLabel()
bl_hash_label = QLabel()
bl_hash_label.setWordWrap(True)
coins_label = QLabel()
coins_label.setWordWrap(True)
language_label = QLabel()
initialized_label = QLabel()
rows = [
(_("Device Label"), device_label),
(_("PIN set"), pin_set_label),
(_("Passphrases"), passphrases_label),
(_("Firmware Version"), version_label),
(_("Device ID"), device_id_label),
(_("Bootloader Hash"), bl_hash_label),
(_("Supported Coins"), coins_label),
(_("Language"), language_label),
(_("Initialized"), initialized_label),
]
for row_num, (label, widget) in enumerate(rows):
info_glayout.addWidget(QLabel(label), row_num, 0)
info_glayout.addWidget(widget, row_num, 1)
info_layout.addLayout(info_glayout)
settings_tab = QWidget()
settings_layout = QVBoxLayout(settings_tab)
settings_glayout = QGridLayout()
label_msg = QLabel(_("Name this {}. If you have multiple devices "
"their labels help distinguish them.")
.format(plugin.device))
label_msg.setWordWrap(True)
label_label = QLabel(_("Device Label"))
label_edit = QLineEdit()
label_edit.setMinimumWidth(150)
label_edit.setMaxLength(plugin.MAX_LABEL_LEN)
label_apply = QPushButton(_("Apply"))
label_apply.clicked.connect(rename)
label_edit.textChanged.connect(set_label_enabled)
settings_glayout.addWidget(label_label, 0, 0)
settings_glayout.addWidget(label_edit, 0, 1, 1, 2)
settings_glayout.addWidget(label_apply, 0, 3)
settings_glayout.addWidget(label_msg, 1, 1, 1, -1)
pin_label = QLabel(_("PIN Protection"))
pin_button = QPushButton()
pin_button.clicked.connect(set_pin)
settings_glayout.addWidget(pin_label, 2, 0)
settings_glayout.addWidget(pin_button, 2, 1)
pin_msg = QLabel(_("PIN protection is strongly recommended. "
"A PIN is your only protection against someone "
"stealing your Ganjacoins if they obtain physical "
"access to your {}.").format(plugin.device))
pin_msg.setWordWrap(True)
pin_msg.setStyleSheet("color: red")
settings_glayout.addWidget(pin_msg, 3, 1, 1, -1)
timeout_label = QLabel(_("Session Timeout"))
timeout_minutes = QLabel()
timeout_slider = QSlider(Qt.Horizontal)
timeout_slider.setRange(1, 60)
timeout_slider.setSingleStep(1)
timeout_slider.setTickInterval(5)
timeout_slider.setTickPosition(QSlider.TicksBelow)
timeout_slider.setTracking(True)
timeout_msg = QLabel(
_("Clear the session after the specified period "
"of inactivity. Once a session has timed out, "
"your PIN and passphrase (if enabled) must be "
"re-entered to use the device."))
timeout_msg.setWordWrap(True)
timeout_slider.setSliderPosition(config.get_session_timeout() // 60)
slider_moved()
timeout_slider.valueChanged.connect(slider_moved)
timeout_slider.sliderReleased.connect(slider_released)
settings_glayout.addWidget(timeout_label, 6, 0)
settings_glayout.addWidget(timeout_slider, 6, 1, 1, 3)
settings_glayout.addWidget(timeout_minutes, 6, 4)
settings_glayout.addWidget(timeout_msg, 7, 1, 1, -1)
settings_layout.addLayout(settings_glayout)
settings_layout.addStretch(1)
advanced_tab = QWidget()
advanced_layout = QVBoxLayout(advanced_tab)
advanced_glayout = QGridLayout()
clear_pin_button = QPushButton(_("Disable PIN"))
clear_pin_button.clicked.connect(clear_pin)
clear_pin_warning = QLabel(
_("If you disable your PIN, anyone with physical access to your "
"{} device can spend your Ganjacoins.").format(plugin.device))
clear_pin_warning.setWordWrap(True)
clear_pin_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(clear_pin_button, 0, 2)
advanced_glayout.addWidget(clear_pin_warning, 1, 0, 1, 5)
passphrase_button = QPushButton()
passphrase_button.clicked.connect(toggle_passphrase)
passphrase_msg = WWLabel(PASSPHRASE_HELP)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(passphrase_button, 3, 2)
advanced_glayout.addWidget(passphrase_msg, 4, 0, 1, 5)
advanced_glayout.addWidget(passphrase_warning, 5, 0, 1, 5)
wipe_device_button = QPushButton(_("Wipe Device"))
wipe_device_button.clicked.connect(wipe_device)
wipe_device_msg = QLabel(
_("Wipe the device, removing all data from it. The firmware "
"is left unchanged."))
wipe_device_msg.setWordWrap(True)
wipe_device_warning = QLabel(
_("Only wipe a device if you have the recovery seed written down "
"and the device wallet(s) are empty, otherwise the Ganjacoins "
"will be lost forever."))
wipe_device_warning.setWordWrap(True)
wipe_device_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(wipe_device_button, 6, 2)
advanced_glayout.addWidget(wipe_device_msg, 7, 0, 1, 5)
advanced_glayout.addWidget(wipe_device_warning, 8, 0, 1, 5)
advanced_layout.addLayout(advanced_glayout)
advanced_layout.addStretch(1)
tabs = QTabWidget(self)
tabs.addTab(info_tab, _("Information"))
tabs.addTab(settings_tab, _("Settings"))
tabs.addTab(advanced_tab, _("Advanced"))
dialog_vbox = QVBoxLayout(self)
dialog_vbox.addWidget(tabs)
dialog_vbox.addLayout(Buttons(CloseButton(self)))
invoke_client(None)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.