content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
'''
BVH Parser Class
By Omid Alemi
Created: June 12, 2017
Based on: https://gist.github.com/johnfredcee/2007503
'''
import re
import numpy as np
from data import Joint, MocapData
class BVHScanner:
'''
A wrapper class for re.Scanner
'''
def __init__(self):
def identifier(scanner, token):
return 'IDENT', token
def operator(scanner, token):
return 'OPERATOR', token
def digit(scanner, token):
return 'DIGIT', token
def open_brace(scanner, token):
return 'OPEN_BRACE', token
def close_brace(scanner, token):
return 'CLOSE_BRACE', token
self.scanner = re.Scanner([
(r'[a-zA-Z_]\w*', identifier),
#(r'-*[0-9]+(\.[0-9]+)?', digit), # won't work for .34
#(r'[-+]?[0-9]*\.?[0-9]+', digit), # won't work for 4.56e-2
#(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit),
(r'-*[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit),
(r'}', close_brace),
(r'}', close_brace),
(r'{', open_brace),
(r':', None),
(r'\s+', None)
])
def scan(self, stuff):
return self.scanner.scan(stuff)
class BVHParser():
'''
A class to parse a BVH file.
Extracts the skeleton and channel values
'''
def __init__(self, filename=None):
self.reset()
def reset(self):
self._skeleton = {}
self.bone_context = []
self._motion_channels = []
self._motions = []
self.current_token = 0
self.framerate = 0.0
self.root_name = ''
self.scanner = BVHScanner()
self.data = MocapData()
def parse(self, filename):
self.reset()
with open(filename, 'r') as bvh_file:
raw_contents = bvh_file.read()
tokens, remainder = self.scanner.scan(raw_contents)
self._parse_hierarchy(tokens)
self.current_token = self.current_token + 1
self._parse_motion(tokens)
self.data.skeleton = self._skeleton
self.data.channel_names = self._motion_channels
self.data.values = self._to_DataFrame()
self.data.root_name = self.root_name
self.data.framerate = self.framerate
return self.data
def _to_DataFrame(self):
'''Returns all of the channels parsed from the file as a pandas DataFrame'''
import pandas as pd
time_index = pd.to_timedelta([f[0] for f in self._motions], unit='s')
frames = [f[1] for f in self._motions]
channels = np.asarray([[channel[2] for channel in frame] for frame in frames])
column_names = ['%s_%s'%(c[0], c[1]) for c in self._motion_channels]
return pd.DataFrame(data=channels, index=time_index, columns=column_names)
def _new_bone(self, parent, name):
bone = {'parent': parent, 'channels': [], 'offsets': [],'children': []}
return bone
def _push_bone_context(self,name):
self.bone_context.append(name)
def _get_bone_context(self):
return self.bone_context[len(self.bone_context)-1]
def _pop_bone_context(self):
self.bone_context = self.bone_context[:-1]
return self.bone_context[len(self.bone_context)-1]
def _read_offset(self, bvh, token_index):
if bvh[token_index] != ('IDENT', 'OFFSET'):
return None, None
token_index = token_index + 1
offsets = [0.0] * 3
for i in range(3):
offsets[i] = float(bvh[token_index][1])
token_index = token_index + 1
return offsets, token_index
def _read_channels(self, bvh, token_index):
if bvh[token_index] != ('IDENT', 'CHANNELS'):
return None, None
token_index = token_index + 1
channel_count = int(bvh[token_index][1])
token_index = token_index + 1
channels = [""] * channel_count
for i in range(channel_count):
channels[i] = bvh[token_index][1]
token_index = token_index + 1
return channels, token_index
def _parse_joint(self, bvh, token_index):
end_site = False
joint_id = bvh[token_index][1]
token_index = token_index + 1
joint_name = bvh[token_index][1]
token_index = token_index + 1
parent_name = self._get_bone_context()
if (joint_id == "End"):
joint_name = parent_name+ '_Nub'
end_site = True
joint = self._new_bone(parent_name, joint_name)
if bvh[token_index][0] != 'OPEN_BRACE':
print('Was expecting brance, got ', bvh[token_index])
return None
token_index = token_index + 1
offsets, token_index = self._read_offset(bvh, token_index)
joint['offsets'] = offsets
if not end_site:
channels, token_index = self._read_channels(bvh, token_index)
joint['channels'] = channels
for channel in channels:
self._motion_channels.append((joint_name, channel))
self._skeleton[joint_name] = joint
self._skeleton[parent_name]['children'].append(joint_name)
while (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'JOINT') or (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'End'):
self._push_bone_context(joint_name)
token_index = self._parse_joint(bvh, token_index)
self._pop_bone_context()
if bvh[token_index][0] == 'CLOSE_BRACE':
return token_index + 1
print('Unexpected token ', bvh[token_index])
def _parse_hierarchy(self, bvh):
self.current_token = 0
if bvh[self.current_token] != ('IDENT', 'HIERARCHY'):
return None
self.current_token = self.current_token + 1
if bvh[self.current_token] != ('IDENT', 'ROOT'):
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][0] != 'IDENT':
return None
root_name = bvh[self.current_token][1]
root_bone = self._new_bone(None, root_name)
self.current_token = self.current_token + 2 #skipping open brace
offsets, self.current_token = self._read_offset(bvh, self.current_token)
channels, self.current_token = self._read_channels(bvh, self.current_token)
root_bone['offsets'] = offsets
root_bone['channels'] = channels
self._skeleton[root_name] = root_bone
self._push_bone_context(root_name)
for channel in channels:
self._motion_channels.append((root_name, channel))
while bvh[self.current_token][1] == 'JOINT':
self.current_token = self._parse_joint(bvh, self.current_token)
self.root_name = root_name
def _parse_motion(self, bvh):
if bvh[self.current_token][0] != 'IDENT':
print('Unexpected text')
return None
if bvh[self.current_token][1] != 'MOTION':
print('No motion section')
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Frames':
return None
self.current_token = self.current_token + 1
frame_count = int(bvh[self.current_token][1])
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Frame':
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Time':
return None
self.current_token = self.current_token + 1
frame_rate = float(bvh[self.current_token][1])
self.framerate = frame_rate
self.current_token = self.current_token + 1
frame_time = 0.0
self._motions = [()] * frame_count
for i in range(frame_count):
channel_values = []
for channel in self._motion_channels:
channel_values.append((channel[0], channel[1], float(bvh[self.current_token][1])))
self.current_token = self.current_token + 1
self._motions[i] = (frame_time, channel_values)
frame_time = frame_time + frame_rate
| app/resources/pymo/pymo/parsers.py | 8,255 | A class to parse a BVH file.
Extracts the skeleton and channel values
A wrapper class for re.Scanner
Returns all of the channels parsed from the file as a pandas DataFrame
BVH Parser Class
By Omid Alemi
Created: June 12, 2017
Based on: https://gist.github.com/johnfredcee/2007503
(r'-*[0-9]+(\.[0-9]+)?', digit), won't work for .34(r'[-+]?[0-9]*\.?[0-9]+', digit), won't work for 4.56e-2(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit),skipping open brace | 463 | en | 0.68317 |
import requests
import logging
import os
import selenium
import unittest
import time
import requests, re
from django.core.management.base import BaseCommand
from search.models import Product, Category, DetailProduct
from django.db import IntegrityError
from django.core.exceptions import MultipleObjectsReturned
from logging.handlers import RotatingFileHandler
from logging import handlers
from configparser import ConfigParser
from django.test import RequestFactory
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core import mail
from django.http import request, HttpRequest
from django.utils.http import base36_to_int, int_to_base36
from django.utils.http import urlsafe_base64_encode
from django.db.models.query_utils import Q
from django.utils.encoding import force_bytes
from django.contrib.auth import get_user_model
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
class Command(BaseCommand):
help = "Tests Selenium"
def __init__(self):
if os.environ.get("ENV") == "DEV":
self.driver = webdriver.Firefox("/Users/david/Projets/selenium driver/")
self.url = "http://127.0.0.1:8000/"
self.driver.maximize_window()
if os.environ.get("ENV") == "TRAVIS":
self.BROWSERSTACK_URL = 'https://davidbarat1:FxhRcmmHYxhSpVrjeAWu@hub-cloud.browserstack.com/wd/hub'
self.desired_cap = {
'os' : 'Windows',
'os_version' : '10',
'browser' : 'Chrome',
'browser_version' : '80',
'name' : "P8 Test"
}
self.driver = webdriver.Remote(
command_executor=self.BROWSERSTACK_URL,
desired_capabilities=self.desired_cap)
self.driver.maximize_window()
self.url = "http://167.99.212.10/"
self.search = "Nutella"
self.user = "test@test.com"
self.password = "007Test!"
self.newpassword = "newpassword456"
def handle(self, *args, **options):
self.testMyProducts()
self.testMentionsContacts()
# self.testResetPassword()
self.tearDown()
def testResetPassword(self):
# self.driver.maximize_window()
self.driver.get(self.url)
time.sleep(5)
self.elem = self.driver.find_element_by_id("login")
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
self.elem = self.driver.find_element_by_id("id_username")
self.elem.send_keys(self.user)
self.elem = self.driver.find_element_by_id("id_password")
self.elem.send_keys(self.password)
self.elem.send_keys(Keys.RETURN)
time.sleep(3)
self.elem = self.driver.find_element_by_id("logout")
self.elem.send_keys(Keys.RETURN)
time.sleep(3)
self.elem = self.driver.find_element_by_id("login")
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
self.elem = self.driver.find_element_by_id("resetpassword")
self.elem.send_keys(Keys.RETURN)
time.sleep(3)
self.elem = self.driver.find_element_by_id("id_email")
self.elem.send_keys(self.user)
time.sleep(3)
self.user_filter = User.objects.filter(Q(email=self.user))
for self.user in self.user_filter:
print(self.user)
self.token = default_token_generator.make_token(self.user)
print(self.token)
self.uid = urlsafe_base64_encode(force_bytes(self.user.pk))
print(self.uid)
self.driver.get(self.url + "reset/%s/%s/" % (self.uid, self.token))
time.sleep(3)
self.driver.find_element_by_id("id_new_password1").send_keys(self.newpassword)
self.driver.find_element_by_id("id_new_password2").send_keys(self.newpassword)
self.elem = self.driver.find_element_by_id("id_new_password2")
time.sleep(3)
self.elem.send_keys(Keys.RETURN)
time.sleep(3)
self.driver.quit()
def testMyProducts(self):
# self.driver.maximize_window()
self.driver.get(self.url)
self.elem = self.driver.find_element_by_id("myproducts")
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
self.elem = self.driver.find_element_by_id("id_username")
self.elem.send_keys(self.user)
self.elem = self.driver.find_element_by_id("id_password")
self.elem.send_keys(self.password)
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
def testMentionsContacts(self):
# self.driver.maximize_window()
self.driver.get(self.url)
self.elem = self.driver.find_element_by_id("mentions")
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
self.elem = self.driver.find_element_by_id("contact")
self.elem.send_keys(Keys.RETURN)
time.sleep(5)
def tearDown(self):
self.driver.quit()
| search/management/commands/test_selenium.py | 5,155 | self.testResetPassword() self.driver.maximize_window() self.driver.maximize_window() self.driver.maximize_window() | 114 | en | 0.179059 |
import asyncio
import datetime
import importlib
import itertools
import os
import random
import re
import shutil
import signal
import subprocess
import sys
import time
import zipfile
import discord
import psutil
from src import const
from src.algorithms import levenshtein_distance
from src.bc import DoNotUpdateFlag
from src.bot_cache import BotCache
from src.bot_instance import BotInstance
from src.config import Command, Config, GuildSettings, SecretConfig, User, bc
from src.embed import DiscordEmbed
from src.emoji import get_clock_emoji
from src.ff import FF
from src.info import BotInfo
from src.log import log
from src.mail import Mail
from src.markov import Markov
from src.message import Msg
from src.reminder import Reminder
from src.repl import Repl
from src.utils import Util
from src.voice import VoiceRoutine
class WalBot(discord.Client):
def __init__(self, name: str, config: Config, secret_config: SecretConfig, intents: discord.Intents) -> None:
super().__init__(intents=intents)
self.repl = None
bc.instance_name = self.instance_name = name
self.config = config
self.secret_config = secret_config
self.bot_cache = BotCache(True)
self.loop.create_task(self._process_reminders())
self.loop.create_task(VoiceRoutine(self.bot_cache).start())
self.loop.create_task(self._repl_routine())
bc.config = self.config
bc.commands = self.config.commands
bc.background_loop = self.loop
bc.latency = lambda: self.latency
bc.change_status = self._change_status
bc.change_presence = self.change_presence
bc.close = self.close
bc.secret_config = self.secret_config
bc.info = BotInfo()
bc.plugin_manager.register()
bc.fetch_channel = self.fetch_channel
if not bc.args.fast_start:
log.debug("Started Markov model checks...")
if bc.markov.check():
log.info("Markov model has passed all checks")
else:
log.info("Markov model has not passed checks, but all errors were fixed")
async def _bot_runner_task(self, *args, **kwargs):
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def run(self, *args, **kwargs):
# Sightly patched implementation from discord.py discord.Client (parent) class
# Reference: https://github.com/Rapptz/discord.py/blob/master/discord/client.py
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
asyncio.ensure_future(self._bot_runner_task(*args, *kwargs), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
log.info('Received signal to terminate bot and event loop')
log.info("Shutting down the bot...")
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
for task in tasks:
if not task.cancelled():
log.error("Asynchronous task cancel failed!")
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(self._on_shutdown())
loop.close()
log.info("Bot is shut down!")
async def _on_shutdown(self) -> None:
if self.repl is not None:
self.repl.stop()
for event in bc.background_events:
event.cancel()
bc.background_loop = None
await bc.plugin_manager.broadcast_command("close")
@Mail.send_exception_info_to_admin_emails_async
async def _precompile(self) -> None:
log.debug("Started precompiling functions...")
levenshtein_distance("", "")
log.debug("Finished precompiling functions")
async def _change_status(self, string: str, type_: discord.ActivityType) -> None:
await self.change_presence(activity=discord.Activity(name=string, type=type_))
async def _config_autosave(self) -> None:
await self.wait_until_ready()
index = 1
while not self.is_closed():
await asyncio.sleep(self.config.saving["period"] * 60)
if index % self.config.saving["backup"]["period"] == 0:
self.config.backup(const.CONFIG_PATH, const.MARKOV_PATH)
self.config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH)
index += 1
async def _process_reminders_iteration(self) -> None:
log.debug3("Reminder processing iteration has started")
now = datetime.datetime.now().replace(second=0).strftime(const.REMINDER_DATETIME_FORMAT)
to_remove = []
to_append = []
reminder_do_not_update_flag = False
for key, rem in self.config.reminders.items():
for i in range(len(rem.prereminders_list)):
prereminder = rem.prereminders_list[i]
used_prereminder = rem.used_prereminders_list[i]
if prereminder == 0 or used_prereminder:
continue
prereminder_time = (
datetime.datetime.now().replace(second=0) + datetime.timedelta(minutes=prereminder))
if rem == prereminder_time.strftime(const.REMINDER_DATETIME_FORMAT):
channel = self.get_channel(rem.channel_id)
e = DiscordEmbed()
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e.title(f"{prereminder} minutes left until reminder")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(
datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(minutes=prereminder))
e.footer(text=rem.author)
await channel.send("", embed=e.get())
rem.used_prereminders_list[i] = True
if rem == now:
channel = self.get_channel(rem.channel_id)
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e = DiscordEmbed()
e.title(f"{clock_emoji} You asked to remind")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(datetime.datetime.now(datetime.timezone.utc))
e.footer(text=rem.author)
await channel.send(' '.join(rem.ping_users if rem.ping_users else ""), embed=e.get())
for user_id in rem.whisper_users:
await Msg.send_direct_message(
self.get_user(user_id), f"You asked to remind at {now} -> {rem.message}", False)
if rem.email_users:
mail = Mail(self.secret_config)
mail.send(
rem.email_users,
f"Reminder: {rem.message}",
f"You asked to remind at {now} -> {rem.message}")
if rem.repeat_after > 0:
new_time = datetime.datetime.now().replace(second=0, microsecond=0) + rem.get_next_event_delta()
new_time = new_time.strftime(const.REMINDER_DATETIME_FORMAT)
to_append.append(
Reminder(str(new_time), rem.message, rem.channel_id, rem.author, rem.time_created))
to_append[-1].repeat_after = rem.repeat_after
to_append[-1].repeat_interval_measure = rem.repeat_interval_measure
to_append[-1].prereminders_list = rem.prereminders_list
to_append[-1].used_prereminders_list = [False] * len(rem.prereminders_list)
to_append[-1].notes = rem.notes
log.debug2(f"Scheduled renew of recurring reminder - old id: {key}")
to_remove.append(key)
elif rem < now:
log.debug2(f"Scheduled reminder with id {key} removal")
to_remove.append(key)
else:
prereminders_delay = 0
if rem.prereminders_list:
prereminders_delay = max(rem.prereminders_list)
if ((datetime.datetime.strptime(rem.time, const.REMINDER_DATETIME_FORMAT) - datetime.datetime.now())
< datetime.timedelta(minutes=(5 + prereminders_delay / 60))):
reminder_do_not_update_flag = True
bc.do_not_update[DoNotUpdateFlag.REMINDER] = reminder_do_not_update_flag
for key in to_remove:
self.config.reminders.pop(key)
for item in to_append:
key = self.config.ids["reminder"]
self.config.reminders[key] = item
self.config.ids["reminder"] += 1
log.debug3("Reminder processing iteration has finished")
@Mail.send_exception_info_to_admin_emails_async
async def _process_reminders(self) -> None:
await self.wait_until_ready()
while not self.is_closed():
await self._process_reminders_iteration()
await asyncio.sleep(const.REMINDER_POLLING_INTERVAL)
async def _repl_routine(self) -> None:
self.repl = Repl(self.config.repl["port"])
await self.repl.start()
@Mail.send_exception_info_to_admin_emails_async
async def on_ready(self) -> None:
self._load_plugins()
log.info(
f"Logged in as: {self.user.name} {self.user.id} ({self.__class__.__name__}), "
f"instance: {self.instance_name}")
self.bot_cache.update({
"ready": True,
})
self.bot_cache.dump_to_file()
bc.guilds = self.guilds
for guild in self.guilds:
if guild.id not in self.config.guilds.keys():
self.config.guilds[guild.id] = GuildSettings(guild.id)
bc.bot_user = self.user
self.loop.create_task(self._config_autosave())
self.loop.create_task(self._precompile())
def _load_plugins(self) -> None:
for plugin_name in bc.plugin_manager.get_plugins_list():
if plugin_name not in self.config.plugins.keys():
self.config.plugins[plugin_name] = {
"autostart": False,
}
for plugin_name, plugin_state in self.config.plugins.items():
if plugin_state["autostart"]:
asyncio.create_task(bc.plugin_manager.send_command(plugin_name, "init"))
@Mail.send_exception_info_to_admin_emails_async
async def on_message(self, message: discord.Message) -> None:
await bc.plugin_manager.broadcast_command("on_message", message)
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
else:
await self._process_regular_message(message)
await self._process_repetitions(message)
@Mail.send_exception_info_to_admin_emails_async
async def on_message_edit(self, old_message: discord.Message, message: discord.Message) -> None:
if message.embeds != old_message.embeds:
log.info(f"<{message.id}> (edit, embed update) {message.author} -> {message.content}")
return
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> (edit) {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
async def _process_repetitions(self, message: discord.Message) -> None:
m = tuple(bc.message_buffer.get(message.channel.id, i) for i in range(3))
if (all(m) and m[0].content and m[0].content == m[1].content == m[2].content and
(m[0].author.id != self.user.id and
m[1].author.id != self.user.id and
m[2].author.id != self.user.id)):
await message.channel.send(m[0].content)
async def _process_regular_message(self, message: discord.Message) -> None:
channel_id = message.channel.id
if isinstance(message.channel, discord.Thread): # Inherit parent channel settings for threads
channel_id = message.channel.parent_id
if (self.user.mentioned_in(message) or self.user.id in [
member.id for member in list(
itertools.chain(*[role.members for role in message.role_mentions]))]):
if channel_id in self.config.guilds[message.channel.guild.id].markov_responses_whitelist:
result = await self.config.disable_pings_in_response(message, bc.markov.generate())
await message.channel.send(message.author.mention + ' ' + result)
elif channel_id in self.config.guilds[message.channel.guild.id].markov_logging_whitelist:
needs_to_be_added = True
for ignored_prefix in bc.markov.ignored_prefixes.values():
if message.content.startswith(ignored_prefix):
needs_to_be_added = False
break
if needs_to_be_added:
bc.markov.add_string(message.content)
if channel_id in self.config.guilds[message.channel.guild.id].responses_whitelist:
responses_count = 0
for response in self.config.responses.values():
if responses_count >= const.MAX_BOT_RESPONSES_ON_ONE_MESSAGE:
break
if re.search(response.regex, message.content):
text = await Command.process_subcommands(
response.text, message, self.config.users[message.author.id])
await Msg.reply(message, text, False)
responses_count += 1
if channel_id in self.config.guilds[message.channel.guild.id].reactions_whitelist:
for reaction in self.config.reactions.values():
if re.search(reaction.regex, message.content):
log.info("Added reaction " + reaction.emoji)
try:
await message.add_reaction(reaction.emoji)
except discord.HTTPException:
pass
async def _process_command(self, message: discord.Message) -> None:
command = message.content.split(' ')
command = list(filter(None, command))
command[0] = command[0][1:]
if not command[0]:
return log.debug("Ignoring empty command")
if command[0] not in self.config.commands.data.keys():
if command[0] in self.config.commands.aliases.keys():
command[0] = self.config.commands.aliases[command[0]]
else:
await message.channel.send(
f"Unknown command '{command[0]}', "
f"probably you meant '{self._suggest_similar_command(command[0])}'")
return
if command[0] not in (
"poll",
"timer",
"stopwatch",
"vqpush",
):
timeout_error, _ = await Util.run_function_with_time_limit(
self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id]),
const.MAX_COMMAND_EXECUTION_TIME)
if command[0] not in (
"silent",
) and timeout_error:
await message.channel.send(f"Command '{' '.join(command)}' took too long to execute")
else:
await self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id])
def _suggest_similar_command(self, unknown_command: str) -> str:
min_dist = 100000
suggestion = ""
for command in self.config.commands.data.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
for command in self.config.commands.aliases.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
return suggestion
async def on_raw_message_edit(self, payload: discord.RawMessageUpdateEvent) -> None:
try:
log.info(f"<{payload.message_id}> (raw_edit) {payload.data['author']['username']}#"
f"{payload.data['author']['discriminator']} -> {payload.data['content']}")
except KeyError:
pass
async def on_raw_message_delete(self, payload: discord.RawMessageDeleteEvent) -> None:
log.info(f"<{payload.message_id}> (delete)")
class DiscordBotInstance(BotInstance):
def start(self, args, main_bot=True):
# Check whether bot is already running
bot_cache = BotCache(main_bot).parse()
if bot_cache is not None:
pid = bot_cache["pid"]
if pid is not None and psutil.pid_exists(pid):
return log.error("Bot is already running!")
# Some variable initializations
config = None
secret_config = None
bc.restart_flag = False
bc.args = args
# Handle --nohup flag
if sys.platform in ("linux", "darwin") and args.nohup:
fd = os.open(const.NOHUP_FILE_PATH, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
log.info(f"Output is redirected to {const.NOHUP_FILE_PATH}")
os.dup2(fd, sys.stdout.fileno())
os.dup2(sys.stdout.fileno(), sys.stderr.fileno())
os.close(fd)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
# Selecting YAML parser
bc.yaml_loader, bc.yaml_dumper = Util.get_yaml(verbose=True)
# Saving application pd in order to safely stop it later
BotCache(main_bot).dump_to_file()
# Executing patch tool if it is necessary
if args.patch:
cmd = f"'{sys.executable}' '{os.path.dirname(__file__) + '/../tools/patch.py'}' all"
log.info("Executing patch tool: " + cmd)
subprocess.call(cmd)
# Read configuration files
config = Util.read_config_file(const.CONFIG_PATH)
if config is None:
config = Config()
secret_config = Util.read_config_file(const.SECRET_CONFIG_PATH)
if secret_config is None:
secret_config = SecretConfig()
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None and os.path.isdir("backup"):
# Check available backups
markov_backups = sorted(
[x for x in os.listdir("backup") if x.startswith("markov_") and x.endswith(".zip")])
if markov_backups:
# Restore Markov model from backup
with zipfile.ZipFile("backup/" + markov_backups[-1], 'r') as zip_ref:
zip_ref.extractall(".")
log.info(f"Restoring Markov model from backup/{markov_backups[-1]}")
shutil.move(markov_backups[-1][:-4], "markov.yaml")
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None:
bc.markov = Markov()
log.warning("Failed to restore Markov model from backup. Creating new Markov model...")
if bc.markov is None:
bc.markov = Markov()
log.info("Created empty Markov model")
# Check config versions
ok = True
ok &= Util.check_version(
"discord.py", discord.__version__, const.DISCORD_LIB_VERSION,
solutions=[
"execute: python -m pip install -r requirements.txt",
])
ok &= Util.check_version(
"Config", config.version, const.CONFIG_VERSION,
solutions=[
"run patch tool",
"remove config.yaml (settings will be lost!)",
])
ok &= Util.check_version(
"Markov config", bc.markov.version, const.MARKOV_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove markov.yaml (Markov model will be lost!)",
])
ok &= Util.check_version(
"Secret config", secret_config.version, const.SECRET_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove secret.yaml (your Discord authentication token will be lost!)",
])
if main_bot and not ok:
sys.exit(const.ExitStatus.CONFIG_FILE_ERROR)
config.commands.update()
# Checking authentication token
if secret_config.token is None:
secret_config = SecretConfig()
if not FF.is_enabled("WALBOT_FEATURE_NEW_CONFIG"):
secret_config.token = input("Enter your token: ")
# Constructing bot instance
if main_bot:
intents = discord.Intents.all()
walbot = WalBot(args.name, config, secret_config, intents=intents)
else:
walbot = importlib.import_module("src.minibot").MiniWalBot(args.name, config, secret_config, args.message)
# Starting the bot
try:
walbot.run(secret_config.token)
except discord.errors.PrivilegedIntentsRequired:
log.error("Privileged Gateway Intents are not enabled! Shutting down the bot...")
# After stopping the bot
log.info("Bot is disconnected!")
if main_bot:
config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH, wait=True)
BotCache(main_bot).remove()
if bc.restart_flag:
cmd = f"'{sys.executable}' '{os.path.dirname(os.path.dirname(__file__)) + '/walbot.py'}' start"
log.info("Calling: " + cmd)
if sys.platform in ("linux", "darwin"):
fork = os.fork()
if fork == 0:
subprocess.call(cmd)
elif fork > 0:
log.info("Stopping current instance of the bot")
sys.exit(const.ExitStatus.NO_ERROR)
else:
subprocess.call(cmd)
def stop(self, _, main_bot=True):
if not BotCache(main_bot).exists():
return log.error("Could not stop the bot (cache file does not exist)")
bot_cache = BotCache(main_bot).parse()
pid = bot_cache["pid"]
if pid is None:
return log.error("Could not stop the bot (cache file does not contain pid)")
if psutil.pid_exists(pid):
if sys.platform == "win32":
# Reference to the original solution:
# https://stackoverflow.com/a/64357453
import ctypes
kernel = ctypes.windll.kernel32
kernel.FreeConsole()
kernel.AttachConsole(pid)
kernel.SetConsoleCtrlHandler(None, 1)
kernel.GenerateConsoleCtrlEvent(0, 0)
else:
os.kill(pid, signal.SIGINT)
while psutil.pid_exists(pid):
log.debug("Bot is still running. Please, wait...")
time.sleep(0.5)
log.info("Bot is stopped!")
else:
log.error("Could not stop the bot (bot is not running)")
BotCache(main_bot).remove()
| src/bot.py | 25,162 | Sightly patched implementation from discord.py discord.Client (parent) class Reference: https://github.com/Rapptz/discord.py/blob/master/discord/client.py Inherit parent channel settings for threads Check whether bot is already running Some variable initializations Handle --nohup flag Selecting YAML parser Saving application pd in order to safely stop it later Executing patch tool if it is necessary Read configuration files Check available backups Restore Markov model from backup Check config versions Checking authentication token Constructing bot instance Starting the bot After stopping the bot Reference to the original solution: https://stackoverflow.com/a/64357453 | 675 | en | 0.724693 |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "LUMOD" not in os.environ:
os.environ["LUMOD"] = buildDir + '/src/lumocashd' + EXEEXT
if "LUMOCLI" not in os.environ:
os.environ["LUMOCLI"] = buildDir + '/src/lumocash-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs lumocash_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs lumocash_hash to pass
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs lumocash_hash to pass
'invalidtxrequest.py', # NOTE: needs lumocash_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs lumocash_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs lumocash_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs lumocash_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in LumoCash
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| qa/pull-tester/rpc-tests.py | 8,739 | !/usr/bin/env python2 Copyright (c) 2014-2015 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.If imported values are not defined then set to zero (or disabled)Create a set to store arguments and create the passOn stringSet env vars https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 https://github.com/bitcoin/bitcoin/pull/5677issuecomment-136646964 python-zmq may not be installed. Handle this gracefully and with some helpful infoTests NOTE: needs lumocash_hash to pass NOTE: needs lumocash_hash to pass NOTE: needs lumocash_hash to pass NOTE: needs lumocash_hash to pass NOTE: needs lumocash_hash to pass NOTE: needs lumocash_hash to pass FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651 'pruning.py', Prune mode is incompatible with -txindex. 'rpcbind_test.py', temporary, bug in libevent, see 6655 NOTE: needs lumocash_hash to pass 'replace-by-fee.py', RBF is disabled in LumoCashRun Tests exit if help is called so we print just one set of instructions Run Extended Tests This is shared from `qa/rpc-tests/test-framework/coverage.py` | 1,265 | en | 0.789271 |
'''
URL: https://leetcode.com/problems/maximum-nesting-depth-of-the-parentheses/
Difficulty: Easy
Description: Maximum Nesting Depth of the Parentheses
A string is a valid parentheses string (denoted VPS) if it meets one of the following:
It is an empty string "", or a single character not equal to "(" or ")",
It can be written as AB (A concatenated with B), where A and B are VPS's, or
It can be written as (A), where A is a VPS.
We can similarly define the nesting depth depth(S) of any VPS S as follows:
depth("") = 0
depth(C) = 0, where C is a string with a single character not equal to "(" or ")".
depth(A + B) = max(depth(A), depth(B)), where A and B are VPS's.
depth("(" + A + ")") = 1 + depth(A), where A is a VPS.
For example, "", "()()", and "()(()())" are VPS's (with nesting depths 0, 1, and 2), and ")(" and "(()" are not VPS's.
Given a VPS represented as string s, return the nesting depth of s.
Example 1:
Input: s = "(1+(2*3)+((8)/4))+1"
Output: 3
Explanation: Digit 8 is inside of 3 nested parentheses in the string.
Example 2:
Input: s = "(1)+((2))+(((3)))"
Output: 3
Example 3:
Input: s = "1+(2*3)/(2-1)"
Output: 1
Example 4:
Input: s = "1"
Output: 0
Constraints:
1 <= s.length <= 100
s consists of digits 0-9 and characters '+', '-', '*', '/', '(', and ')'.
It is guaranteed that parentheses expression s is a VPS.
'''
class Solution:
def maxDepth(self, s):
maxD = -float('inf')
currD = 0
for ch in s:
if ch not in ["(", ")"]:
continue
if ch == "(":
currD += 1
else:
maxD = max(maxD, currD)
currD -= 1
return maxD if maxD != -float('inf') else currD
| 1614 Maximum Nesting Depth of the Parentheses.py | 1,733 | URL: https://leetcode.com/problems/maximum-nesting-depth-of-the-parentheses/
Difficulty: Easy
Description: Maximum Nesting Depth of the Parentheses
A string is a valid parentheses string (denoted VPS) if it meets one of the following:
It is an empty string "", or a single character not equal to "(" or ")",
It can be written as AB (A concatenated with B), where A and B are VPS's, or
It can be written as (A), where A is a VPS.
We can similarly define the nesting depth depth(S) of any VPS S as follows:
depth("") = 0
depth(C) = 0, where C is a string with a single character not equal to "(" or ")".
depth(A + B) = max(depth(A), depth(B)), where A and B are VPS's.
depth("(" + A + ")") = 1 + depth(A), where A is a VPS.
For example, "", "()()", and "()(()())" are VPS's (with nesting depths 0, 1, and 2), and ")(" and "(()" are not VPS's.
Given a VPS represented as string s, return the nesting depth of s.
Example 1:
Input: s = "(1+(2*3)+((8)/4))+1"
Output: 3
Explanation: Digit 8 is inside of 3 nested parentheses in the string.
Example 2:
Input: s = "(1)+((2))+(((3)))"
Output: 3
Example 3:
Input: s = "1+(2*3)/(2-1)"
Output: 1
Example 4:
Input: s = "1"
Output: 0
Constraints:
1 <= s.length <= 100
s consists of digits 0-9 and characters '+', '-', '*', '/', '(', and ')'.
It is guaranteed that parentheses expression s is a VPS. | 1,351 | en | 0.848085 |
def remove_nan_entries(df, key_columns, verbose=True):
n_row = len(df)
for column in key_columns:
df = df[df[column] == df[column]]
if verbose:
print("Prune ({}/{}) rows.".format(n_row - len(df), n_row))
return df
def parse_relationship_path(relationship_path):
# TODO: get the relationship with a public function instead
relationship = relationship_path._relationships_with_direction[0][1]
return {
'parent_entity_id': relationship.parent_entity.id,
'parent_variable_id': relationship.parent_variable.id,
'child_entity_id': relationship.child_entity.id,
'child_variable_id': relationship.child_variable.id,
}
def get_forward_entities(entityset, entity_id):
ids = []
entity_id_pipe = [entity_id]
while len(entity_id_pipe):
entity_id = entity_id_pipe[0]
entity_id_pipe = entity_id_pipe[1:]
ids.append(entity_id)
for child_id, _ in entityset.get_forward_entities(entity_id):
entity_id_pipe.append(child_id)
return ids
def get_forward_attributes(entityset, target_entity, direct_id, interesting_ids=None):
info = []
entity_id_pipe = [(target_entity, direct_id)]
while len(entity_id_pipe):
entity_id, direct_id = entity_id_pipe.pop()
if interesting_ids is not None and entity_id not in interesting_ids:
continue
df = entityset[entity_id].df
info = [{'entityId': entity_id, 'items': df.loc[direct_id].fillna('N/A').to_dict()}] + info
for child_id, relationship_path in entityset.get_forward_entities(entity_id):
relation = parse_relationship_path(relationship_path)
entity_id_pipe.append((child_id, df.loc[direct_id][relation['parent_variable_id']]))
return info
def find_path(entityset, source_entity, target_entity):
"""Find a path of the source entity to the target_entity."""
nodes_pipe = [target_entity]
parent_dict = {target_entity: None}
while len(nodes_pipe):
parent_node = nodes_pipe.pop()
if parent_node == source_entity:
break
child_nodes = [e[0] for e in entityset.get_backward_entities(parent_node)] \
+ [e[0] for e in entityset.get_forward_entities(parent_node)]
for child in child_nodes:
if child not in parent_dict:
parent_dict[child] = parent_node
nodes_pipe.append(child)
node = source_entity
paths = [[node]]
while node != target_entity:
node = parent_dict[node]
paths.append(paths[-1] + [node])
return paths
def transfer_cutoff_times(entityset, cutoff_times, source_entity, target_entity,
reduce="latest"):
path = find_path(entityset, source_entity, target_entity)[-1]
for i, source in enumerate(path[:-1]):
target = path[i + 1]
options = list(filter(lambda r: (r.child_entity.id == source
and r.parent_entity.id == target)
or (r.parent_entity.id == source
and r.child_entity.id == target),
entityset.relationships))
if len(options) == 0:
raise ValueError("No Relationship between {} and {}".format(source, target))
r = options[0]
if target == r.child_entity.id:
# Transfer cutoff_times to "child", e.g., PATIENTS -> ADMISSIONS
child_df_index = r.child_entity.df[r.child_variable.id].values
cutoff_times = cutoff_times.loc[child_df_index]
cutoff_times.index = r.child_entity.df.index
elif source == r.child_entity.id:
# Transfer cutoff_times to "parent", e.g., ADMISSIONS -> PATIENTS
cutoff_times[r.child_variable.id] = r.child_entity.df[r.child_variable.id]
if reduce == "latest":
idx = cutoff_times.groupby(r.child_variable.id).time.idxmax().values
elif reduce == 'earist':
idx = cutoff_times.groupby(r.child_variable.id).time.idxmin().values
else:
raise ValueError("Unknown reduce option.")
cutoff_times = cutoff_times.loc[idx]
cutoff_times = cutoff_times.set_index(r.child_variable.id, drop=True)
return cutoff_times
def get_records(entityset, subject_id, entity_id, time_index=None, cutoff_time=None):
entity = entityset[entity_id].df
# select records by SUBJECT_ID
if 'SUBJECT_ID' in entity.columns:
entity_df = entity[entity['SUBJECT_ID'] == subject_id]
else:
entity_df = entity
# select records before or at the cutoff_time
if cutoff_time is not None and time_index is not None:
entity_df = entity_df[entity_df[time_index] <= cutoff_time]
# TODO filter records according to secondary time index
return entity_df
def get_item_dict(es):
item_dict = {'LABEVENTS': es['D_LABITEMS'].df.loc[:, 'LABEL'].to_dict()}
for entity_id in ['CHARTEVENTS', 'SURGERY_VITAL_SIGNS']:
df = es['D_ITEMS'].df
# TODO: Change 'LABEL' to 'LABEL_CN' for Chinese labels
items = df[df['LINKSTO'] == entity_id.lower()].loc[:, 'LABEL']
item_dict[entity_id] = items.to_dict()
return item_dict
| vbridge/utils/entityset_helpers.py | 5,296 | Find a path of the source entity to the target_entity.
TODO: get the relationship with a public function instead Transfer cutoff_times to "child", e.g., PATIENTS -> ADMISSIONS Transfer cutoff_times to "parent", e.g., ADMISSIONS -> PATIENTS select records by SUBJECT_ID select records before or at the cutoff_time TODO filter records according to secondary time index TODO: Change 'LABEL' to 'LABEL_CN' for Chinese labels | 422 | en | 0.778927 |
from django.test import TestCase
# Create your tests here.
class Account(TestCase):
def test_register(self):
self.assertTrue(True)
| hhcms/apps/account/tests.py | 147 | Create your tests here. | 23 | en | 0.899389 |
import gc
import string
import random
class ActiveGarbageCollection:
def __init__(self, title):
assert gc.isenabled(), "Garbage collection should be enabled"
self.title = title
def __enter__(self):
self._collect("start")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._collect("completion")
def _collect(self, step):
n = gc.collect()
if n > 0:
print(f"{self.title}: freed {n} unreachable objects on {step}")
def is_corrupted(entity_max_eventid, last_eventid):
if last_eventid is None and entity_max_eventid is None:
# no events, no entities
return False
elif last_eventid is not None and entity_max_eventid is None:
# events but no data (apply has failed or upload has been aborted)
return False
elif entity_max_eventid is not None and last_eventid is None:
# entities but no events (data is corrupted)
return True
elif entity_max_eventid is not None and last_eventid is not None:
# entities and events, entities can never be newer than events
return entity_max_eventid > last_eventid
def get_event_ids(storage):
"""Get the highest event id from the entities and the eventid of the most recent event
:param storage: GOB (events + entities)
:return:highest entity eventid and last eventid
"""
with storage.get_session():
entity_max_eventid = storage.get_entity_max_eventid()
last_eventid = storage.get_last_eventid()
return entity_max_eventid, last_eventid
def random_string(length):
"""Returns a random string of length :length: consisting of lowercase characters and digits
:param length:
:return:
"""
assert length > 0
characters = string.ascii_lowercase + ''.join([str(i) for i in range(10)])
return ''.join([random.choice(characters) for _ in range(length)])
| src/gobupload/utils.py | 1,930 | Get the highest event id from the entities and the eventid of the most recent event
:param storage: GOB (events + entities)
:return:highest entity eventid and last eventid
Returns a random string of length :length: consisting of lowercase characters and digits
:param length:
:return:
no events, no entities events but no data (apply has failed or upload has been aborted) entities but no events (data is corrupted) entities and events, entities can never be newer than events | 480 | en | 0.904267 |
import tensorflow as tf
import cPickle as pickle
import rnn_model
import cnn_model
from dataloader import Dataloader
import os
import datetime
import numpy as np
import argparse
from cnn_model import unroll
def main():
parser = argparse.ArgumentParser(description='Evaluate .')
parser.add_argument('rundir', type=str, help='directory of tf checkpoint file')
parser.add_argument('--model', type=str, help="Neural network architecture. 'lstm', 'rnn' or 'cnn' (default lstm)", default='lstm')
parser.add_argument('--gpu', type=int, help="Select gpu (e.g. 0), via environment variable CUDA_VISIBLE_DEVICES (default None)", default=None)
args = parser.parse_args()
""" GPU management """
allow_gpu_mem_growth = True
gpu_memory_fraction = 1
gpu_id = args.gpu
if args.gpu is not None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
dataloader = Dataloader(datafolder="data/eval", batchsize=500)
#dataloader = Dataloader(conn=conn, batch_size=args.batchsize, sql_where=args.sqlwhere,
# debug=False,
# do_shuffle=False, do_init_shuffle=True, tablename=args.tablename)
"""
Load
parameters
from init_from model
"""
with open(os.path.join(args.rundir, "args.pkl"), "rb") as f:
modelargs = pickle.load(f)
"""
Create
new
model
object
with same parameter """
print("building model graph")
if args.model in ["rnn","lstm"]:
model = rnn_model.Model(n_input=modelargs["n_input"], n_classes=modelargs["n_classes"], n_layers=modelargs["n_layers"], batch_size=dataloader.batchsize,
adam_lr=modelargs["adam_lr"],rnn_cell_type=args.model , dropout_keep_prob=modelargs["dropout_keep_prob"], n_cell_per_input=modelargs["n_cell_per_input"], gpu=0)
evaluate=evaluate_rnn
if args.model == "cnn":
model = cnn_model.Model(n_input=modelargs["n_input"], n_classes=modelargs["n_classes"], n_layers=modelargs["n_layers"],
adam_lr=1e-3, dropout_keep_prob=modelargs["dropout_keep_prob"], n_cell_per_input=modelargs["n_cell_per_input"], gpu=gpu_id)
evaluate = evaluate_cnn
probabilities, targets, observations = evaluate(model,dataloader,
init_dir=args.rundir,
print_every=20,
gpu_memory_fraction=gpu_memory_fraction,
allow_gpu_mem_growth=allow_gpu_mem_growth)
#np.save(os.path.join(args.rundir, "eval_confusion_matrix.npy"), confusion_matrix)
np.save(os.path.join(args.rundir, "eval_probabilities.npy"), probabilities)
np.save(os.path.join(args.rundir, "eval_targets.npy"), targets)
np.save(os.path.join(args.rundir, "eval_observations.npy"), observations)
def evaluate_rnn(model,
dataloader,
print_every=5,
init_dir=None,
allow_gpu_mem_growth=True,
gpu_memory_fraction=0.3):
"""
This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
"""
saver = tf.train.Saver()
# container for output data
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print("start")
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if init_dir is not None:
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print("restoring model from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(1, dataloader.num_batches):
# step as number of features -> invariant to changes in batch size
step += dataloader.batch_size
s_db = datetime.datetime.now()
X, y, seq_lengths = dataloader.next_batch()
e_db = datetime.datetime.now()
feed = {model.X: X, model.y_: y, model.seq_lengths: seq_lengths}
cm, scores, targets, obs = sess.run([model.confusion_matrix, model.scores, model.targets, model.obs],
feed_dict=feed)
all_obs = np.append(all_obs, obs)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
#total_cm += cm
e_tr = datetime.datetime.now()
dt_db = e_db - s_db
dt_tr = e_tr - e_db
field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()
# approximate calculation time
approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)
eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)
t_last = datetime.datetime.now()
if i % print_every == 0:
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = "Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} " \
"(time: db {}ms; eval {}ms, {} feat/s, eta: {})".format(
i,
step,
dataloader.epoch,
dataloader.batch,
dataloader.num_batches,
cross_entropy,
int(dt_db.total_seconds() * 1000),
int(dt_tr.total_seconds() * 1000),
int(field_per_s),
eta.strftime("%d.%b %H:%M")
)
print(msg)
return all_scores.reshape(-1, model.n_classes), \
all_targets.reshape(-1, model.n_classes).astype(bool), \
all_obs
def evaluate_cnn(model,
dataloader,
print_every=5,
init_dir=None,
allow_gpu_mem_growth=True,
gpu_memory_fraction=0.3):
"""
This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
"""
saver = tf.train.Saver()
# container for output data
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print("start")
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if init_dir is not None:
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print("restoring model from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
with open(init_dir + "/steps.txt", "r") as f:
line = f.read()
step_, epoch_ = line.split(" ")
step = int(step_)
dataloader.epoch = int(epoch_)
for i in range(1, dataloader.num_batches):
# step as number of features -> invariant to changes in batch size
step += dataloader.batch_size
s_db = datetime.datetime.now()
X, y, seq_lengths = dataloader.next_batch()
e_db = datetime.datetime.now()
# unroll also index of observation. -> TODO integrate in unroll function, but need to update also dependencies
batch_size, max_seqlengths, n_input = X.shape
ones = np.ones([batch_size, max_seqlengths])
mask_ = np.arange(0, max_seqlengths) * ones < (seq_lengths * ones.T).T
mask = mask_.reshape(-1)
obs_ = np.arange(0, max_seqlengths) * ones
obs = obs_.reshape(-1)[mask]
""" unroll data """
X, y = unroll(X, y, seq_lengths)
feed = {model.X: X, model.y: y, model.batch_size: X.shape[0]}
scores, targets = sess.run([model.scores, model.targets],
feed_dict=feed)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
e_tr = datetime.datetime.now()
dt_db = e_db - s_db
dt_tr = e_tr - e_db
field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()
# approximate calculation time
approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)
eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)
t_last = datetime.datetime.now()
if i % print_every == 0:
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = "Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} " \
"(time: db {}ms; eval {}ms, {} feat/s, eta: {})".format(
i,
step,
dataloader.epoch,
dataloader.batch,
dataloader.num_batches,
cross_entropy,
int(dt_db.total_seconds() * 1000),
int(dt_tr.total_seconds() * 1000),
int(field_per_s),
eta.strftime("%d.%b %H:%M")
)
print(msg)
return all_scores.reshape(-1, model.n_classes), \
all_targets.reshape(-1, model.n_classes).astype(bool), \
obs
if __name__ == '__main__':
main() | evaluate.py | 12,145 | This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
see issue 152dataloader = Dataloader(conn=conn, batch_size=args.batchsize, sql_where=args.sqlwhere, debug=False, do_shuffle=False, do_init_shuffle=True, tablename=args.tablename)np.save(os.path.join(args.rundir, "eval_confusion_matrix.npy"), confusion_matrix) container for output data step as number of features -> invariant to changes in batch sizetotal_cm += cm approximate calculation time container for output data step as number of features -> invariant to changes in batch size unroll also index of observation. -> TODO integrate in unroll function, but need to update also dependencies approximate calculation time | 2,702 | en | 0.683857 |
#! /usr/bin/env python
# coding=utf-8
import os
import time
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from tqdm import tqdm
from core.dataset import Dataset
from core.yolov3 import YOLOV3
from core.config import cfg
class YoloTrain(object):
def __init__(self): # 从config文件获取到一些变量
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.max_bbox_per_scale = 150
self.train_logdir = "./data/log/train" # 日志保存地址
self.trainset = Dataset('train')
self.testset = Dataset('test')
self.steps_per_period = len(self.trainset)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with tf.name_scope('define_input'): # 定义输入层
self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')
self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
self.trainable = tf.placeholder(dtype=tf.bool, name='training')
with tf.name_scope("define_loss"): # 定义损失函数
self.model = YOLOV3(self.input_data, self.trainable)
self.net_var = tf.global_variables()
self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
self.label_sbbox, self.label_mbbox, self.label_lbbox,
self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
self.loss = self.giou_loss + self.conf_loss + self.prob_loss
with tf.name_scope('learn_rate'): # 定义学习率
self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period,
dtype=tf.float64, name='warmup_steps')
train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs) * self.steps_per_period,
dtype=tf.float64, name='train_steps')
self.learn_rate = tf.cond(
pred=self.global_step < warmup_steps,
true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) *
(1 + tf.cos(
(self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
)
global_step_update = tf.assign_add(self.global_step, 1.0)
'''
warmup_steps作用:
神经网络在刚开始训练的过程中容易出现loss=NaN的情况,为了尽量避免这个情况,因此初始的学习率设置得很低
但是这又使得训练速度变慢了。因此,采用逐渐增大的学习率,从而达到既可以尽量避免出现nan,又可以等训练过程稳定了再增大训练速度的目的。
'''
with tf.name_scope("define_weight_decay"): # 指数平滑,可以让算法在最后不那么震荡,结果更有鲁棒性
moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())
# 指定需要恢复的参数。层等信息, 位置提前,减少模型体积。
with tf.name_scope('loader_and_saver'):
variables_to_restore = [v for v in self.net_var if
v.name.split('/')[0] not in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']]
self.loader = tf.train.Saver(variables_to_restore)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
with tf.name_scope("define_first_stage_train"): # 第一阶段训练,只训练指定层
self.first_stage_trainable_var_list = []
for var in tf.trainable_variables():
var_name = var.op.name
var_name_mess = str(var_name).split('/')
if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
self.first_stage_trainable_var_list.append(var)
first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=self.first_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([first_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_frozen_variables = tf.no_op()
with tf.name_scope("define_second_stage_train"): # 第二阶段训练,释放所有层
second_stage_trainable_var_list = tf.trainable_variables()
second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=second_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([second_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_all_variables = tf.no_op()
with tf.name_scope('summary'):
tf.summary.scalar("learn_rate", self.learn_rate)
tf.summary.scalar("giou_loss", self.giou_loss)
tf.summary.scalar("conf_loss", self.conf_loss)
tf.summary.scalar("prob_loss", self.prob_loss)
tf.summary.scalar("total_loss", self.loss)
logdir = "./data/log/" # 日志保存地址
if os.path.exists(logdir): shutil.rmtree(logdir)
os.mkdir(logdir)
self.write_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)
def train(self):
self.sess.run(tf.global_variables_initializer())
try:
print('=> Restoring weights from: %s ... ' % self.initial_weight)
self.loader.restore(self.sess, self.initial_weight)
except:
print('=> %s does not exist !!!' % self.initial_weight)
print('=> Now it starts to train YOLOV3 from scratch ...')
self.first_stage_epochs = 0
# 阶段学习率
for epoch in range(1, 1 + self.first_stage_epochs + self.second_stage_epochs):
if epoch <= self.first_stage_epochs:
train_op = self.train_op_with_frozen_variables
else:
train_op = self.train_op_with_all_variables
# tqdm is a visualization tool that displays an Iterable object in a progree bar
pbar = tqdm(self.trainset)
train_epoch_loss, test_epoch_loss = [], []
for train_data in pbar:
_, summary, train_step_loss, global_step_val = self.sess.run(
[train_op, self.write_op, self.loss, self.global_step], feed_dict={
self.input_data: train_data[0],
self.label_sbbox: train_data[1],
self.label_mbbox: train_data[2],
self.label_lbbox: train_data[3],
self.true_sbboxes: train_data[4],
self.true_mbboxes: train_data[5],
self.true_lbboxes: train_data[6],
self.trainable: True,
})
train_epoch_loss.append(train_step_loss)
self.summary_writer.add_summary(summary, global_step_val)
pbar.set_description("train loss: %.2f" % train_step_loss)
for test_data in self.testset:
test_step_loss = self.sess.run(self.loss, feed_dict={
self.input_data: test_data[0],
self.label_sbbox: test_data[1],
self.label_mbbox: test_data[2],
self.label_lbbox: test_data[3],
self.true_sbboxes: test_data[4],
self.true_mbboxes: test_data[5],
self.true_lbboxes: test_data[6],
self.trainable: False,
})
test_epoch_loss.append(test_step_loss)
train_epoch_loss, test_epoch_loss = np.mean(train_epoch_loss), np.mean(test_epoch_loss)
ckpt_file = "./checkpoint/yolov3_train_loss=%.4f.ckpt" % train_epoch_loss
log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print("=> Epoch: %2d Time: %s Train loss: %.2f Test loss: %.2f Saving %s ..."
% (epoch, log_time, train_epoch_loss, test_epoch_loss, ckpt_file))
self.saver.save(self.sess, ckpt_file, global_step=epoch)
if __name__ == '__main__': YoloTrain().train()
| train.py | 9,978 | ! /usr/bin/env python coding=utf-8 从config文件获取到一些变量 日志保存地址 定义输入层 定义损失函数 定义学习率 指数平滑,可以让算法在最后不那么震荡,结果更有鲁棒性 指定需要恢复的参数。层等信息, 位置提前,减少模型体积。 第一阶段训练,只训练指定层 第二阶段训练,释放所有层 日志保存地址 阶段学习率 tqdm is a visualization tool that displays an Iterable object in a progree bar | 253 | zh | 0.947621 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: david@reciprocitylabs.com
from collections import namedtuple
from flask import session
from flask.ext.login import current_user
from .user_permissions import UserPermissions
from ggrc.models import get_model
Permission = namedtuple('Permission', 'action resource_type context_id')
_contributing_resource_types = {}
# Return a list of resource types using the same context space.
# This is needed because permissions may be given for, e.g., "Contract", but
# the restriction on join is done knowing only "Directive".
def get_contributing_resource_types(resource_type):
resource_types = _contributing_resource_types.get(resource_type, None)
if resource_types is None:
resource_types = [resource_type]
resource_model = get_model(resource_type)
if resource_model:
resource_manager = resource_model._sa_class_manager
resource_types.extend(
manager.class_.__name__ for manager in
resource_manager.subclass_managers(True))
_contributing_resource_types[resource_type] = resource_types
return resource_types
class DefaultUserPermissionsProvider(object):
def __init__(self, settings):
pass
def permissions_for(self, user):
return DefaultUserPermissions()
class DefaultUserPermissions(UserPermissions):
# super user, context_id 0 indicates all contexts
ADMIN_PERMISSION = Permission(
'__GGRC_ADMIN__',
'__GGRC_ALL__',
0,
)
def _admin_permission_for_context(self, context_id):
return Permission(
self.ADMIN_PERMISSION.action,
self.ADMIN_PERMISSION.resource_type,
context_id)
def _permission_match(self, permission, permissions):
return permission.context_id in \
permissions\
.get(permission.action, {})\
.get(permission.resource_type, [])
def _is_allowed(self, permission):
if 'permissions' not in session:
return True
permissions = session['permissions']
if permissions is None:
return True
if self._permission_match(permission, permissions):
return True
if self._permission_match(self.ADMIN_PERMISSION, permissions):
return True
return self._permission_match(
self._admin_permission_for_context(permission.context_id),
permissions)
def is_allowed_create(self, resource_type, context_id):
"""Whether or not the user is allowed to create a resource of the specified
type in the context."""
return self._is_allowed(Permission('create', resource_type, context_id))
def is_allowed_read(self, resource_type, context_id):
"""Whether or not the user is allowed to read a resource of the specified
type in the context."""
return self._is_allowed(Permission('read', resource_type, context_id))
def is_allowed_update(self, resource_type, context_id):
"""Whether or not the user is allowed to update a resource of the specified
type in the context."""
return self._is_allowed(Permission('update', resource_type, context_id))
def is_allowed_delete(self, resource_type, context_id):
"""Whether or not the user is allowed to delete a resource of the specified
type in the context."""
return self._is_allowed(Permission('delete', resource_type, context_id))
def _get_contexts_for(self, action, resource_type):
# FIXME: (Security) When applicable, we should explicitly assert that no
# permissions are expected (e.g. that every user has ADMIN_PERMISSION).
if 'permissions' not in session:
return None
permissions = session['permissions']
if permissions is None:
return None
if self._permission_match(self.ADMIN_PERMISSION, permissions):
return None
# Get the list of contexts for a given resource type and any
# superclasses
resource_types = get_contributing_resource_types(resource_type)
ret = []
for resource_type in resource_types:
ret.extend(permissions.get(action, {}).get(resource_type, ()))
# Extend with the list of all contexts for which the user is an ADMIN
admin_list = list(
permissions.get(self.ADMIN_PERMISSION.action, {})\
.get(self.ADMIN_PERMISSION.resource_type, ()))
ret.extend(admin_list)
return ret
def create_contexts_for(self, resource_type):
"""All contexts in which the user has create permission."""
return self._get_contexts_for('create', resource_type)
def read_contexts_for(self, resource_type):
"""All contexts in which the user has read permission."""
return self._get_contexts_for('read', resource_type)
def update_contexts_for(self, resource_type):
"""All contexts in which the user has update permission."""
return self._get_contexts_for('update', resource_type)
def delete_contexts_for(self, resource_type):
"""All contexts in which the user has delete permission."""
return self._get_contexts_for('delete', resource_type)
| src/ggrc/rbac/permissions_provider.py | 5,103 | All contexts in which the user has create permission.
All contexts in which the user has delete permission.
Whether or not the user is allowed to create a resource of the specified
type in the context.
Whether or not the user is allowed to delete a resource of the specified
type in the context.
Whether or not the user is allowed to read a resource of the specified
type in the context.
Whether or not the user is allowed to update a resource of the specified
type in the context.
All contexts in which the user has read permission.
All contexts in which the user has update permission.
Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file> Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> Created By: david@reciprocitylabs.com Maintained By: david@reciprocitylabs.com Return a list of resource types using the same context space. This is needed because permissions may be given for, e.g., "Contract", but the restriction on join is done knowing only "Directive". super user, context_id 0 indicates all contexts FIXME: (Security) When applicable, we should explicitly assert that no permissions are expected (e.g. that every user has ADMIN_PERMISSION). Get the list of contexts for a given resource type and any superclasses Extend with the list of all contexts for which the user is an ADMIN | 1,352 | en | 0.867623 |
"""
Django settings for session_words project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u@cj5-77l85mz0t186p6@1c(d607sgv(0t5lm!4h$ok8to&h@v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.main',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'session_words.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'session_words.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| Django/session_words/session_words/settings.py | 3,135 | Django settings for session_words project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/1.11/ref/settings/databases Password validation https://docs.djangoproject.com/en/1.11/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/1.11/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.11/howto/static-files/ | 1,003 | en | 0.643611 |
import unittest
import tfexpt
import expt
from tensorlog import matrixdb
from tensorlog import program
from tensorlog import dataset
class TestNative(unittest.TestCase):
def setUp(self):
(self.n,self.maxD,self.epochs) = (16,8,20)
(self.factFile,trainFile,testFile) = expt.genInputs(self.n)
# (self.factFile,self.trainFile,self.testFile) = ('inputs/g16.cfacts','inputs/g16-train.exam','inputs/g16-test.exam')
self.db = matrixdb.MatrixDB.loadFile(self.factFile)
self.prog = program.Program.loadRules("grid.ppr",self.db)
self.trainData = dataset.Dataset.loadExamples(self.prog.db,trainFile)
self.testData = dataset.Dataset.loadExamples(self.prog.db,testFile)
def testIt(self):
acc,loss = expt.accExpt(self.prog,self.trainData,self.testData,self.n,self.maxD,self.epochs)
print('acc',acc)
self.assertTrue(acc >= 0.85)
times = expt.timingExpt(self.prog)
for t in times:
print('time',t)
self.assertTrue(t < 0.05)
class TestAccTF(unittest.TestCase):
def setUp(self):
(self.n,self.maxD,self.epochs) = (16,8,20)
(self.factFile,self.trainFile,self.testFile) = expt.genInputs(self.n)
(self.tlog,self.trainData,self.testData) = tfexpt.setup_tlog(self.maxD,self.factFile,self.trainFile,self.testFile)
def testIt(self):
acc = tfexpt.trainAndTest(self.tlog,self.trainData,self.testData,self.epochs)
print('acc',acc)
self.assertTrue(acc >= 0.85)
if __name__ == "__main__":
unittest.main()
| datasets/grid/testexpt.py | 1,474 | (self.factFile,self.trainFile,self.testFile) = ('inputs/g16.cfacts','inputs/g16-train.exam','inputs/g16-test.exam') | 115 | en | 0.233345 |
import hashlib
import random
from typing import Tuple, Dict
from self_driving.beamng_config import BeamNGConfig
from self_driving.beamng_evaluator import BeamNGEvaluator
from core.member import Member
from self_driving.catmull_rom import catmull_rom
from self_driving.road_bbox import RoadBoundingBox
from self_driving.road_polygon import RoadPolygon
from self_driving.edit_distance_polyline import iterative_levenshtein
Tuple4F = Tuple[float, float, float, float]
Tuple2F = Tuple[float, float]
class BeamNGMember(Member):
"""A class representing a road returned by the RoadGenerator."""
counter = 0
def __init__(self, control_nodes: Tuple4F, sample_nodes: Tuple4F, num_spline_nodes: int,
road_bbox: RoadBoundingBox):
super().__init__()
BeamNGMember.counter += 1
self.name = f'mbr{str(BeamNGMember.counter)}'
self.name_ljust = self.name.ljust(7)
self.control_nodes = control_nodes
self.sample_nodes = sample_nodes
self.num_spline_nodes = num_spline_nodes
self.road_bbox = road_bbox
self.config: BeamNGConfig = None
self.problem: 'BeamNGProblem' = None
self._evaluator: BeamNGEvaluator = None
def clone(self):
res = BeamNGMember(list(self.control_nodes), list(self.sample_nodes), self.num_spline_nodes, self.road_bbox)
res.config = self.config
res.problem = self.problem
res.distance_to_boundary = self.distance_to_boundary
return res
def to_dict(self) -> dict:
return {
'control_nodes': self.control_nodes,
'sample_nodes': self.sample_nodes,
'num_spline_nodes': self.num_spline_nodes,
'road_bbox_size': self.road_bbox.bbox.bounds,
'distance_to_boundary': self.distance_to_boundary
}
@classmethod
def from_dict(cls, dict: Dict):
road_bbox = RoadBoundingBox(dict['road_bbox_size'])
res = BeamNGMember([tuple(t) for t in dict['control_nodes']],
[tuple(t) for t in dict['sample_nodes']],
dict['num_spline_nodes'], road_bbox)
res.distance_to_boundary = dict['distance_to_boundary']
return res
def evaluate(self):
if self.needs_evaluation():
self.simulation = self.problem._get_evaluator().evaluate([self])
print('eval mbr', self)
#assert not self.needs_evaluation()
def needs_evaluation(self):
return self.distance_to_boundary is None or self.simulation is None
def clear_evaluation(self):
self.distance_to_boundary = None
def is_valid(self):
return (RoadPolygon.from_nodes(self.sample_nodes).is_valid() and
self.road_bbox.contains(RoadPolygon.from_nodes(self.control_nodes[1:-1])))
def distance(self, other: 'BeamNGMember'):
#TODO
#return frechet_dist(self.sample_nodes, other.sample_nodes)
return iterative_levenshtein(self.sample_nodes, other.sample_nodes)
#return frechet_dist(self.sample_nodes[0::3], other.sample_nodes[0::3])
def to_tuple(self):
import numpy as np
barycenter = np.mean(self.control_nodes, axis=0)[:2]
return barycenter
def mutate(self) -> 'BeamNGMember':
RoadMutator(self, lower_bound=-int(self.problem.config.MUTATION_EXTENT), upper_bound=int(self.problem.config.MUTATION_EXTENT)).mutate()
self.distance_to_boundary = None
return self
def __repr__(self):
eval_boundary = 'na'
if self.distance_to_boundary:
eval_boundary = str(self.distance_to_boundary)
if self.distance_to_boundary > 0:
eval_boundary = '+' + eval_boundary
eval_boundary = '~' + eval_boundary
eval_boundary = eval_boundary[:7].ljust(7)
h = hashlib.sha256(str([tuple(node) for node in self.control_nodes]).encode('UTF-8')).hexdigest()[-5:]
return f'{self.name_ljust} h={h} b={eval_boundary}'
class RoadMutator:
NUM_UNDO_ATTEMPTS = 20
def __init__(self, road: BeamNGMember, lower_bound=-2, upper_bound=2):
self.road = road
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def mutate_gene(self, index, xy_prob=0.5) -> Tuple[int, int]:
gene = list(self.road.control_nodes[index])
# Choose the mutation extent
candidate_mut_values = [i for i in range(self.lower_bound, self.upper_bound) if i !=0]
mut_value = random.choice(candidate_mut_values)
#mut_value = random.randint(self.lower_bound, self.upper_bound)
# Avoid to choose 0
#if mut_value == 0:
# mut_value += 1
# Select coordinate to mutate
if random.random() < xy_prob:
c = 1
else:
c = 0
gene[c] += mut_value
self.road.control_nodes[index] = tuple(gene)
self.road.sample_nodes = catmull_rom(self.road.control_nodes, self.road.num_spline_nodes)
return c, mut_value
def undo_mutation(self, index, c, mut_value):
gene = list(self.road.control_nodes[index])
gene[c] -= mut_value
self.road.control_nodes[index] = tuple(gene)
self.road.sample_nodes = catmull_rom(self.road.control_nodes, self.road.num_spline_nodes)
def mutate(self, num_undo_attempts=10):
backup_nodes = list(self.road.control_nodes)
attempted_genes = set()
n = len(self.road.control_nodes) - 2
seglength = 3
candidate_length = n - (2 * seglength)
assert(candidate_length > 0)
def next_gene_index() -> int:
if len(attempted_genes) == candidate_length:
return -1
i = None
condition = False
while not condition:
i = random.randint(seglength, n - seglength)
if i not in attempted_genes:
condition = True
assert(i is not None)
assert seglength <= i <= n - seglength
# i = random.randint(3, n - 3)
# while i in attempted_genes:
# i = random.randint(3, n-3)
attempted_genes.add(i)
return i
gene_index = next_gene_index()
while gene_index != -1:
c, mut_value = self.mutate_gene(gene_index)
attempt = 0
is_valid = self.road.is_valid()
while not is_valid and attempt < num_undo_attempts:
self.undo_mutation(gene_index, c, mut_value)
c, mut_value = self.mutate_gene(gene_index)
attempt += 1
is_valid = self.road.is_valid()
if is_valid:
break
else:
gene_index = next_gene_index()
if gene_index == -1:
raise ValueError("No gene can be mutated")
assert self.road.is_valid()
assert self.road.control_nodes != backup_nodes | DeepHyperion-BNG/self_driving/beamng_member.py | 6,969 | A class representing a road returned by the RoadGenerator.
assert not self.needs_evaluation()TODOreturn frechet_dist(self.sample_nodes, other.sample_nodes)return frechet_dist(self.sample_nodes[0::3], other.sample_nodes[0::3]) Choose the mutation extentmut_value = random.randint(self.lower_bound, self.upper_bound) Avoid to choose 0if mut_value == 0: mut_value += 1 Select coordinate to mutate i = random.randint(3, n - 3) while i in attempted_genes: i = random.randint(3, n-3) | 485 | en | 0.500883 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import numpy as np
from arch.api.proto.feature_scale_meta_pb2 import ScaleMeta
from arch.api.proto.feature_scale_param_pb2 import ScaleParam
from arch.api.proto.feature_scale_param_pb2 import ColumnScaleParam
from arch.api.utils import log_utils
from federatedml.feature.feature_scale.base_scale import BaseScale
from federatedml.statistic.statics import MultivariateStatisticalSummary
LOGGER = log_utils.getLogger()
class StandardScale(BaseScale):
"""
Standardize features by removing the mean and scaling to unit variance. The standard score of a sample x is calculated as:
z = (x - u) / s, where u is the mean of the training samples, and s is the standard deviation of the training samples
"""
def __init__(self, params):
super().__init__(params)
self.with_mean = params.with_mean
self.with_std = params.with_std
self.mean = None
self.std = None
def set_param(self, mean, std):
self.mean = mean
self.std = std
@staticmethod
def __scale_with_column_range(data, column_upper, column_lower, mean, std, process_cols_list):
for i in process_cols_list:
value = data.features[i]
if value > column_upper[i]:
value = column_upper[i]
elif value < column_lower[i]:
value = column_lower[i]
data.features[i] = np.around((value - mean[i]) / std[i], 6)
return data
@staticmethod
def __scale(data, mean, std, process_cols_list):
for i in process_cols_list:
data.features[i] = np.around((data.features[i] - mean[i]) / std[i], 6)
return data
def fit(self, data):
"""
Apply standard scale for input data
Parameters
----------
data: data_instance, input data
Returns
----------
data:data_instance, data after scale
mean: list, each column mean value
std: list, each column standard deviation
"""
self.column_min_value, self.column_max_value = self._get_min_max_value(data)
self.scale_column_idx = self._get_scale_column_idx(data)
self.header = self._get_header(data)
self.data_shape = self._get_data_shape(data)
# fit column value if larger than parameter upper or less than parameter lower
data = self.fit_feature_range(data)
if not self.with_mean and not self.with_std:
self.mean = [0 for _ in range(self.data_shape)]
self.std = [1 for _ in range(self.data_shape)]
else:
self.summary_obj = MultivariateStatisticalSummary(data, -1)
if self.with_mean:
self.mean = self.summary_obj.get_mean()
self.mean = [self.mean[key] for key in self.header]
else:
self.mean = [0 for _ in range(self.data_shape)]
if self.with_std:
self.std = self.summary_obj.get_std_variance()
self.std = [self.std[key] for key in self.header]
for i, value in enumerate(self.std):
if np.abs(value - 0) < 1e-6:
self.std[i] = 1
else:
self.std = [1 for _ in range(self.data_shape)]
f = functools.partial(self.__scale, mean=self.mean, std=self.std, process_cols_list=self.scale_column_idx)
fit_data = data.mapValues(f)
return fit_data
def transform(self, data):
"""
Transform input data using standard scale with fit results
Parameters
----------
data: data_instance, input data
Returns
----------
transform_data:data_instance, data after transform
"""
f = functools.partial(self.__scale_with_column_range, column_upper=self.column_max_value,
column_lower=self.column_min_value,
mean=self.mean, std=self.std, process_cols_list=self.scale_column_idx)
transform_data = data.mapValues(f)
return transform_data
def __get_meta(self):
if self.header:
scale_column = [self.header[i] for i in self.scale_column_idx]
else:
scale_column = ["_".join(["col", str(i)]) for i in self.scale_column_idx]
if not self.data_shape:
self.data_shape = -1
meta_proto_obj = ScaleMeta(method="standard_scale",
area=self.area,
scale_column=scale_column,
feat_upper=self._get_upper(self.data_shape),
feat_lower=self._get_lower(self.data_shape),
with_mean=self.with_mean,
with_std=self.with_std
)
return meta_proto_obj
def __get_param(self, need_run):
column_scale_param_dict = {}
if self.header:
for i, header in enumerate(self.header):
if i in self.scale_column_idx:
param_obj = ColumnScaleParam(column_upper=self.column_max_value[i],
column_lower=self.column_min_value[i],
mean=self.mean[i],
std=self.std[i])
column_scale_param_dict[header] = param_obj
param_proto_obj = ScaleParam(col_scale_param=column_scale_param_dict,
header=self.header,
need_run=need_run)
return param_proto_obj
def export_model(self, need_run):
meta_obj = self.__get_meta()
param_obj = self.__get_param(need_run)
result = {
self.model_meta_name: meta_obj,
self.model_param_name: param_obj
}
return result
| federatedml/feature/feature_scale/standard_scale.py | 6,596 | Standardize features by removing the mean and scaling to unit variance. The standard score of a sample x is calculated as:
z = (x - u) / s, where u is the mean of the training samples, and s is the standard deviation of the training samples
Apply standard scale for input data
Parameters
----------
data: data_instance, input data
Returns
----------
data:data_instance, data after scale
mean: list, each column mean value
std: list, each column standard deviation
Transform input data using standard scale with fit results
Parameters
----------
data: data_instance, input data
Returns
----------
transform_data:data_instance, data after transform
Copyright 2019 The FATE Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. fit column value if larger than parameter upper or less than parameter lower | 1,313 | en | 0.727556 |
# -*- coding: utf-8 -*
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.conf.urls import url
from iam.contrib.django.dispatcher.dispatchers import (
DjangoBasicResourceApiDispatcher,
success_response,
)
from iam.resource.utils import get_filter_obj, get_page_obj
from auth.bkiam import IAM_REGISTERED_SYSTEM
from auth.bkiam import resources
from auth.bkiam.backend import iam
from auth.bkiam.resources import BKDataResourceProvider
class BKDataDjangoBasicResourceApiDispatcher(DjangoBasicResourceApiDispatcher):
def _dispatch_search_instance(self, request, data, request_id):
options = self._get_options(request)
filter_obj = get_filter_obj(data.get("filter"), ["parent", "keyword"])
page_obj = get_page_obj(data.get("page"))
provider = self._provider[data["type"]]
pre_process = getattr(provider, "pre_search_instance", None)
if pre_process and callable(pre_process):
pre_process(filter_obj, page_obj, **options)
result = provider.list_instance(filter_obj, page_obj, **options)
return success_response(result.to_dict(), request_id)
def register_resources(dispatcher, resources_module):
for item in dir(resources):
if not item.endswith("ResourceProvider"):
continue
resource_class = getattr(resources_module, item)
if issubclass(resource_class, BKDataResourceProvider) and resource_class.resource_type is not None:
dispatcher.register(resource_class.resource_type, resource_class())
dispatcher = BKDataDjangoBasicResourceApiDispatcher(iam, IAM_REGISTERED_SYSTEM)
register_resources(dispatcher, resources)
urlpatterns = [url(r"^resource/api/v1/$", dispatcher.as_view([]), name="iamApi")]
| src/api/auth/bkiam/urls.py | 3,088 | Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*- coding: utf-8 -* | 1,359 | en | 0.852174 |
#link (https://neps.academy/problem/443)
voltas,placas= input().split()
result = int(voltas) * int(placas)
numbers = []
resultado = result * float(str(0) + str('.') + str(1))
for x in range(2,11):
if int(resultado)==resultado:
numbers.append(int(resultado))
else:
numbers.append(int(resultado)+1)
resultado = result * float(str(0) + str('.') + str(x))
for x in numbers:
print(int(x), end=' ')
| Python/Hora da Corrida - SBC 2019.py | 431 | link (https://neps.academy/problem/443) | 39 | en | 0.541387 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tethys_datasets', '0002_auto_20150119_1756'),
]
operations = [
migrations.CreateModel(
name='SpatialDatasetService',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=30)),
('engine', models.CharField(default=b'tethys_dataset_services.engines.GeoServerSpatialDatasetEngine', max_length=200, choices=[(b'tethys_dataset_services.engines.GeoServerSpatialDatasetEngine', b'GeoServer')])),
('endpoint', models.CharField(max_length=1024)),
('apikey', models.CharField(max_length=100, blank=True)),
('username', models.CharField(max_length=100, blank=True)),
('password', models.CharField(max_length=100, blank=True)),
],
options={
'verbose_name': 'Spatial Dataset Service',
'verbose_name_plural': 'Spatial Dataset Services',
},
bases=(models.Model,),
),
]
| tethys_datasets/migrations/0003_spatialdatasetservice.py | 1,282 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.applications.mobilenet import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory('cell_images/train',
target_size=(100,100),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory('cell_images/test',
target_size=(100,100),
batch_size=32,
class_mode='binary')
model = keras.models.load_model("model.h5")
#model.compile(optimizer = 'Adam',loss = 'binary_crossentropy',metrics = ['accuracy'])
#checkpoints = ModelCheckpoint("checkpoints/weights.{epoch:02d}.h5",
# save_weights_only = False,
# verbose = 1)
#step_size_train = train_generator.n//train_generator.batch_size
model.fit_generator(train_generator,
steps_per_epoch=8000,
epochs=5,
validation_data=validation_generator,
validation_steps=800)
#callbacks = [checkpoints])
model.save("model_2.h5")
| train_more.py | 1,877 | model.compile(optimizer = 'Adam',loss = 'binary_crossentropy',metrics = ['accuracy'])checkpoints = ModelCheckpoint("checkpoints/weights.{epoch:02d}.h5", save_weights_only = False, verbose = 1)step_size_train = train_generator.n//train_generator.batch_sizecallbacks = [checkpoints]) | 363 | en | 0.5415 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Interface to the Stanford Part-of-speech and Named-Entity Taggers
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Nitin Madnani <nmadnani@ets.org>
# Rami Al-Rfou' <ralrfou@cs.stonybrook.edu>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A module for interfacing with the Stanford taggers.
Tagger models need to be downloaded from http://nlp.stanford.edu/software
and the STANFORD_MODELS environment variable set (a colon-separated
list of paths).
For more details see the documentation for StanfordPOSTagger and StanfordNERTagger.
"""
import os
import tempfile
from subprocess import PIPE
import warnings
from nltk.internals import find_file, find_jar, config_java, java, _java_options, find_jars_within_path
from nltk.tag.api import TaggerI
from nltk import compat
_stanford_url = 'http://nlp.stanford.edu/software'
class StanfordTagger(TaggerI):
"""
An interface to Stanford taggers. Subclasses must define:
- ``_cmd`` property: A property that returns the command that will be
executed.
- ``_SEPARATOR``: Class constant that represents that character that
is used to separate the tokens from their tags.
- ``_JAR`` file: Class constant that represents the jar file name.
"""
_SEPARATOR = ''
_JAR = ''
def __init__(self, model_filename, path_to_jar=None, encoding='utf8', verbose=False, java_options='-mx1000m'):
if not self._JAR:
warnings.warn('The StanfordTagger class is not meant to be '
'instantiated directly. Did you mean StanfordPOSTagger or StanfordNERTagger?')
self._stanford_jar = find_jar(
self._JAR, path_to_jar,
searchpath=(), url=_stanford_url,
verbose=verbose)
self._stanford_model = find_file(model_filename,
env_vars=('STANFORD_MODELS',), verbose=verbose)
# Adding logging jar files to classpath
stanford_dir = os.path.split(self._stanford_jar)[0]
self._stanford_jar = tuple(find_jars_within_path(stanford_dir))
self._encoding = encoding
self.java_options = java_options
@property
def _cmd(self):
raise NotImplementedError
def tag(self, tokens):
# This function should return list of tuple rather than list of list
return sum(self.tag_sents([tokens]), [])
def tag_sents(self, sentences):
encoding = self._encoding
default_options = ' '.join(_java_options)
config_java(options=self.java_options, verbose=False)
# Create a temporary input file
_input_fh, self._input_file_path = tempfile.mkstemp(text=True)
cmd = list(self._cmd)
cmd.extend(['-encoding', encoding])
# Write the actual sentences to the temporary input file
_input_fh = os.fdopen(_input_fh, 'wb')
_input = '\n'.join((' '.join(x) for x in sentences))
if isinstance(_input, compat.text_type) and encoding:
_input = _input.encode(encoding)
_input_fh.write(_input)
_input_fh.close()
# Run the tagger and get the output
stanpos_output, _stderr = java(cmd, classpath=self._stanford_jar,
stdout=PIPE, stderr=PIPE)
stanpos_output = stanpos_output.decode(encoding)
# Delete the temporary file
os.unlink(self._input_file_path)
# Return java configurations to their default values
config_java(options=default_options, verbose=False)
return self.parse_output(stanpos_output, sentences)
def parse_output(self, text, sentences = None):
# Output the tagged sentences
tagged_sentences = []
for tagged_sentence in text.strip().split("\n"):
sentence = []
for tagged_word in tagged_sentence.strip().split():
word_tags = tagged_word.strip().split(self._SEPARATOR)
sentence.append((''.join(word_tags[:-1]), word_tags[-1]))
tagged_sentences.append(sentence)
return tagged_sentences
class StanfordPOSTagger(StanfordTagger):
"""
A class for pos tagging with Stanford Tagger. The input is the paths to:
- a model trained on training data
- (optionally) the path to the stanford tagger jar file. If not specified here,
then this jar file must be specified in the CLASSPATH envinroment variable.
- (optionally) the encoding of the training data (default: UTF-8)
Example:
>>> from nltk.tag import StanfordPOSTagger
>>> st = StanfordPOSTagger('english-bidirectional-distsim.tagger') # doctest: +SKIP
>>> st.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP
[('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')]
"""
_SEPARATOR = '_'
_JAR = 'stanford-postagger.jar'
def __init__(self, *args, **kwargs):
super(StanfordPOSTagger, self).__init__(*args, **kwargs)
@property
def _cmd(self):
return ['edu.stanford.nlp.tagger.maxent.MaxentTagger',
'-model', self._stanford_model, '-textFile',
self._input_file_path, '-tokenize', 'false','-outputFormatOptions', 'keepEmptySentences']
class StanfordNERTagger(StanfordTagger):
"""
A class for Named-Entity Tagging with Stanford Tagger. The input is the paths to:
- a model trained on training data
- (optionally) the path to the stanford tagger jar file. If not specified here,
then this jar file must be specified in the CLASSPATH envinroment variable.
- (optionally) the encoding of the training data (default: UTF-8)
Example:
>>> from nltk.tag import StanfordNERTagger
>>> st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz') # doctest: +SKIP
>>> st.tag('Rami Eid is studying at Stony Brook University in NY'.split()) # doctest: +SKIP
[('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'),
('at', 'O'), ('Stony', 'ORGANIZATION'), ('Brook', 'ORGANIZATION'),
('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'LOCATION')]
"""
_SEPARATOR = '/'
_JAR = 'stanford-ner.jar'
_FORMAT = 'slashTags'
def __init__(self, *args, **kwargs):
super(StanfordNERTagger, self).__init__(*args, **kwargs)
@property
def _cmd(self):
# Adding -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerOptions tokenizeNLs=false for not using stanford Tokenizer
return ['edu.stanford.nlp.ie.crf.CRFClassifier',
'-loadClassifier', self._stanford_model, '-textFile',
self._input_file_path, '-outputFormat', self._FORMAT, '-tokenizerFactory', 'edu.stanford.nlp.process.WhitespaceTokenizer', '-tokenizerOptions','\"tokenizeNLs=false\"']
def parse_output(self, text, sentences):
if self._FORMAT == 'slashTags':
# Joint together to a big list
tagged_sentences = []
for tagged_sentence in text.strip().split("\n"):
for tagged_word in tagged_sentence.strip().split():
word_tags = tagged_word.strip().split(self._SEPARATOR)
tagged_sentences.append((''.join(word_tags[:-1]), word_tags[-1]))
# Separate it according to the input
result = []
start = 0
for sent in sentences:
result.append(tagged_sentences[start:start + len(sent)])
start += len(sent);
return result
raise NotImplementedError
| env/lib/python3.6/site-packages/nltk/tag/stanford.py | 7,818 | A class for Named-Entity Tagging with Stanford Tagger. The input is the paths to:
- a model trained on training data
- (optionally) the path to the stanford tagger jar file. If not specified here,
then this jar file must be specified in the CLASSPATH envinroment variable.
- (optionally) the encoding of the training data (default: UTF-8)
Example:
>>> from nltk.tag import StanfordNERTagger
>>> st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz') # doctest: +SKIP
>>> st.tag('Rami Eid is studying at Stony Brook University in NY'.split()) # doctest: +SKIP
[('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'),
('at', 'O'), ('Stony', 'ORGANIZATION'), ('Brook', 'ORGANIZATION'),
('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'LOCATION')]
A class for pos tagging with Stanford Tagger. The input is the paths to:
- a model trained on training data
- (optionally) the path to the stanford tagger jar file. If not specified here,
then this jar file must be specified in the CLASSPATH envinroment variable.
- (optionally) the encoding of the training data (default: UTF-8)
Example:
>>> from nltk.tag import StanfordPOSTagger
>>> st = StanfordPOSTagger('english-bidirectional-distsim.tagger') # doctest: +SKIP
>>> st.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP
[('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')]
An interface to Stanford taggers. Subclasses must define:
- ``_cmd`` property: A property that returns the command that will be
executed.
- ``_SEPARATOR``: Class constant that represents that character that
is used to separate the tokens from their tags.
- ``_JAR`` file: Class constant that represents the jar file name.
A module for interfacing with the Stanford taggers.
Tagger models need to be downloaded from http://nlp.stanford.edu/software
and the STANFORD_MODELS environment variable set (a colon-separated
list of paths).
For more details see the documentation for StanfordPOSTagger and StanfordNERTagger.
-*- coding: utf-8 -*- Natural Language Toolkit: Interface to the Stanford Part-of-speech and Named-Entity Taggers Copyright (C) 2001-2017 NLTK Project Author: Nitin Madnani <nmadnani@ets.org> Rami Al-Rfou' <ralrfou@cs.stonybrook.edu> URL: <http://nltk.org/> For license information, see LICENSE.TXT Adding logging jar files to classpath This function should return list of tuple rather than list of list Create a temporary input file Write the actual sentences to the temporary input file Run the tagger and get the output Delete the temporary file Return java configurations to their default values Output the tagged sentences Adding -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerOptions tokenizeNLs=false for not using stanford Tokenizer Joint together to a big list Separate it according to the input | 2,993 | en | 0.635709 |
##############################################################################
#
# Copyright 2019 Leap Beyond Emerging Technologies B.V. (unless otherwise stated)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
"""
Docker step by step building blocks:
generate docker image, prepare model, and build model
"""
import logging
import os.path as osp
import subprocess
from jinja2 import Environment, PackageLoader
from ..utils import get_model_tag_and_version
from .. import __version__ as catwalk_version
logger = logging.getLogger(__name__)
def build_prep(model_path=".", server_config=None, server_port=9090):
"""Prepares the model to be Dockerised by generating a dockerimage"""
model_path = osp.abspath(model_path)
model_tag, model_version = get_model_tag_and_version(model_path)
if server_config is None:
server_config = "false"
kwargs = {
"catwalk_version": catwalk_version,
"model_tag": model_tag,
"model_version": model_version,
"server_config": server_config,
"server_port": server_port
}
files_to_create = ["Dockerfile", ".dockerignore"]
env = Environment(loader=PackageLoader("catwalk", "templates"))
for f in files_to_create:
template_file = f + ".j2"
if template_file[0] == ".":
template_file = template_file[1:]
template = env.get_template(template_file)
rendered = template.render(**kwargs)
out_path = osp.join(model_path, f)
with open(out_path, "w") as fp:
fp.write(rendered)
logger.info("Wrote " + f)
def build(model_path=".", docker_registry=None, push=True, no_cache=False): # pragma: no cover
"""Builds the model into a Dockerised model server image."""
model_path = osp.abspath(model_path)
model_tag, model_version = get_model_tag_and_version(model_path)
model_path = osp.abspath(model_path)
# Setup
image_name_parts = [model_tag]
if docker_registry is not None:
image_name_parts.insert(0, docker_registry)
image_name = "/".join(image_name_parts)
docker_tag = image_name + ":" + model_version
# Perform the docker build
cmd = ["docker", "build", model_path]
cmd += ["-t", docker_tag]
if no_cache:
cmd += ["--no-cache"]
logger.info(" ".join(cmd))
result = subprocess.run(cmd, check=True)
if result.returncode != 0:
return result.returncode
logger.info("Successfully built " + docker_tag)
if not push:
return 0
# Perform the docker push
cmd = ["docker", "push", docker_tag]
logger.info(" ".join(cmd))
result = subprocess.run(cmd, check=True)
return result.returncode
| catwalk/cicd/build_steps.py | 3,289 | Builds the model into a Dockerised model server image.
Prepares the model to be Dockerised by generating a dockerimage
Docker step by step building blocks:
generate docker image, prepare model, and build model
Copyright 2019 Leap Beyond Emerging Technologies B.V. (unless otherwise stated) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pragma: no cover Setup Perform the docker build Perform the docker push | 901 | en | 0.838173 |
#!/usr/bin/env python
#
# Copyright (c) 2014 Google, Inc
#
# SPDX-License-Identifier: GPL-2.0+
#
# Intel microcode update tool
from optparse import OptionParser
import os
import re
import struct
import sys
MICROCODE_DIR = 'arch/x86/dts/microcode'
class Microcode:
"""Holds information about the microcode for a particular model of CPU.
Attributes:
name: Name of the CPU this microcode is for, including any version
information (e.g. 'm12206a7_00000029')
model: Model code string (this is cpuid(1).eax, e.g. '206a7')
words: List of hex words containing the microcode. The first 16 words
are the public header.
"""
def __init__(self, name, data):
self.name = name
# Convert data into a list of hex words
self.words = []
for value in ''.join(data).split(','):
hexval = value.strip()
if hexval:
self.words.append(int(hexval, 0))
# The model is in the 4rd hex word
self.model = '%x' % self.words[3]
def ParseFile(fname):
"""Parse a micrcode.dat file and return the component parts
Args:
fname: Filename to parse
Returns:
3-Tuple:
date: String containing date from the file's header
license_text: List of text lines for the license file
microcodes: List of Microcode objects from the file
"""
re_date = re.compile('/\* *(.* [0-9]{4}) *\*/$')
re_license = re.compile('/[^-*+] *(.*)$')
re_name = re.compile('/\* *(.*)\.inc *\*/', re.IGNORECASE)
microcodes = {}
license_text = []
date = ''
data = []
name = None
with open(fname) as fd:
for line in fd:
line = line.rstrip()
m_date = re_date.match(line)
m_license = re_license.match(line)
m_name = re_name.match(line)
if m_name:
if name:
microcodes[name] = Microcode(name, data)
name = m_name.group(1).lower()
data = []
elif m_license:
license_text.append(m_license.group(1))
elif m_date:
date = m_date.group(1)
else:
data.append(line)
if name:
microcodes[name] = Microcode(name, data)
return date, license_text, microcodes
def ParseHeaderFiles(fname_list):
"""Parse a list of header files and return the component parts
Args:
fname_list: List of files to parse
Returns:
date: String containing date from the file's header
license_text: List of text lines for the license file
microcodes: List of Microcode objects from the file
"""
microcodes = {}
license_text = []
date = ''
name = None
for fname in fname_list:
name = os.path.basename(fname).lower()
name = os.path.splitext(name)[0]
data = []
with open(fname) as fd:
license_start = False
license_end = False
for line in fd:
line = line.rstrip()
if len(line) >= 2:
if line[0] == '/' and line[1] == '*':
license_start = True
continue
if line[0] == '*' and line[1] == '/':
license_end = True
continue
if license_start and not license_end:
# Ignore blank line
if len(line) > 0:
license_text.append(line)
continue
# Omit anything after the last comma
words = line.split(',')[:-1]
data += [word + ',' for word in words]
microcodes[name] = Microcode(name, data)
return date, license_text, microcodes
def List(date, microcodes, model):
"""List the available microcode chunks
Args:
date: Date of the microcode file
microcodes: Dict of Microcode objects indexed by name
model: Model string to search for, or None
"""
print 'Date: %s' % date
if model:
mcode_list, tried = FindMicrocode(microcodes, model.lower())
print 'Matching models %s:' % (', '.join(tried))
else:
print 'All models:'
mcode_list = [microcodes[m] for m in microcodes.keys()]
for mcode in mcode_list:
print '%-20s: model %s' % (mcode.name, mcode.model)
def FindMicrocode(microcodes, model):
"""Find all the microcode chunks which match the given model.
This model is something like 306a9 (the value returned in eax from
cpuid(1) when running on Intel CPUs). But we allow a partial match,
omitting the last 1 or two characters to allow many families to have the
same microcode.
If the model name is ambiguous we return a list of matches.
Args:
microcodes: Dict of Microcode objects indexed by name
model: String containing model name to find
Returns:
Tuple:
List of matching Microcode objects
List of abbreviations we tried
"""
# Allow a full name to be used
mcode = microcodes.get(model)
if mcode:
return [mcode], []
tried = []
found = []
for i in range(3):
abbrev = model[:-i] if i else model
tried.append(abbrev)
for mcode in microcodes.values():
if mcode.model.startswith(abbrev):
found.append(mcode)
if found:
break
return found, tried
def CreateFile(date, license_text, mcodes, outfile):
"""Create a microcode file in U-Boot's .dtsi format
Args:
date: String containing date of original microcode file
license: List of text lines for the license file
mcodes: Microcode objects to write (normally only 1)
outfile: Filename to write to ('-' for stdout)
"""
out = '''/*%s
* ---
* This is a device tree fragment. Use #include to add these properties to a
* node.
*
* Date: %s
*/
compatible = "intel,microcode";
intel,header-version = <%d>;
intel,update-revision = <%#x>;
intel,date-code = <%#x>;
intel,processor-signature = <%#x>;
intel,checksum = <%#x>;
intel,loader-revision = <%d>;
intel,processor-flags = <%#x>;
/* The first 48-bytes are the public header which repeats the above data */
data = <%s
\t>;'''
words = ''
add_comments = len(mcodes) > 1
for mcode in mcodes:
if add_comments:
words += '\n/* %s */' % mcode.name
for i in range(len(mcode.words)):
if not (i & 3):
words += '\n'
val = mcode.words[i]
# Change each word so it will be little-endian in the FDT
# This data is needed before RAM is available on some platforms so
# we cannot do an endianness swap on boot.
val = struct.unpack("<I", struct.pack(">I", val))[0]
words += '\t%#010x' % val
# Use the first microcode for the headers
mcode = mcodes[0]
# Take care to avoid adding a space before a tab
text = ''
for line in license_text:
if line[0] == '\t':
text += '\n *' + line
else:
text += '\n * ' + line
args = [text, date]
args += [mcode.words[i] for i in range(7)]
args.append(words)
if outfile == '-':
print out % tuple(args)
else:
if not outfile:
if not os.path.exists(MICROCODE_DIR):
print >> sys.stderr, "Creating directory '%s'" % MICROCODE_DIR
os.makedirs(MICROCODE_DIR)
outfile = os.path.join(MICROCODE_DIR, mcode.name + '.dtsi')
print >> sys.stderr, "Writing microcode for '%s' to '%s'" % (
', '.join([mcode.name for mcode in mcodes]), outfile)
with open(outfile, 'w') as fd:
print >> fd, out % tuple(args)
def MicrocodeTool():
"""Run the microcode tool"""
commands = 'create,license,list'.split(',')
parser = OptionParser()
parser.add_option('-d', '--mcfile', type='string', action='store',
help='Name of microcode.dat file')
parser.add_option('-H', '--headerfile', type='string', action='append',
help='Name of .h file containing microcode')
parser.add_option('-m', '--model', type='string', action='store',
help="Model name to extract ('all' for all)")
parser.add_option('-M', '--multiple', type='string', action='store',
help="Allow output of multiple models")
parser.add_option('-o', '--outfile', type='string', action='store',
help='Filename to use for output (- for stdout), default is'
' %s/<name>.dtsi' % MICROCODE_DIR)
parser.usage += """ command
Process an Intel microcode file (use -h for help). Commands:
create Create microcode .dtsi file for a model
list List available models in microcode file
license Print the license
Typical usage:
./tools/microcode-tool -d microcode.dat -m 306a create
This will find the appropriate file and write it to %s.""" % MICROCODE_DIR
(options, args) = parser.parse_args()
if not args:
parser.error('Please specify a command')
cmd = args[0]
if cmd not in commands:
parser.error("Unknown command '%s'" % cmd)
if (not not options.mcfile) != (not not options.mcfile):
parser.error("You must specify either header files or a microcode file, not both")
if options.headerfile:
date, license_text, microcodes = ParseHeaderFiles(options.headerfile)
elif options.mcfile:
date, license_text, microcodes = ParseFile(options.mcfile)
else:
parser.error('You must specify a microcode file (or header files)')
if cmd == 'list':
List(date, microcodes, options.model)
elif cmd == 'license':
print '\n'.join(license_text)
elif cmd == 'create':
if not options.model:
parser.error('You must specify a model to create')
model = options.model.lower()
if options.model == 'all':
options.multiple = True
mcode_list = microcodes.values()
tried = []
else:
mcode_list, tried = FindMicrocode(microcodes, model)
if not mcode_list:
parser.error("Unknown model '%s' (%s) - try 'list' to list" %
(model, ', '.join(tried)))
if not options.multiple and len(mcode_list) > 1:
parser.error("Ambiguous model '%s' (%s) matched %s - try 'list' "
"to list or specify a particular file" %
(model, ', '.join(tried),
', '.join([m.name for m in mcode_list])))
CreateFile(date, license_text, mcode_list, options.outfile)
else:
parser.error("Unknown command '%s'" % cmd)
if __name__ == "__main__":
MicrocodeTool()
| qemu_mode/qemu-2.10.0/roms/u-boot/tools/microcode-tool.py | 11,074 | !/usr/bin/env python Copyright (c) 2014 Google, Inc SPDX-License-Identifier: GPL-2.0+ Intel microcode update tool Convert data into a list of hex words The model is in the 4rd hex word Ignore blank line Omit anything after the last comma Allow a full name to be used Change each word so it will be little-endian in the FDT This data is needed before RAM is available on some platforms so we cannot do an endianness swap on boot. Use the first microcode for the headers Take care to avoid adding a space before a tab | 520 | en | 0.719714 |
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2007-2008 Trolltech ASA. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## Licensees holding a valid Qt License Agreement may use this file in
## accordance with the rights, responsibilities and obligations
## contained therein. Please consult your licensing agreement or
## contact sales@trolltech.com if any conditions of this licensing
## agreement are not clear to you.
##
## Further information about Qt licensing is available at:
## http://www.trolltech.com/products/qt/licensing.html or by
## contacting info@trolltech.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import sys
from PySide import QtCore, QtGui
try:
from PySide.phonon import Phonon
except ImportError:
app = QtGui.QApplication(sys.argv)
QtGui.QMessageBox.critical(None, "Phonon Capabilities",
"Your Qt installation does not have Phonon support.",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
sys.exit(1)
class Window(QtGui.QWidget):
def __init__(self):
super(QtGui.QWidget, self).__init__()
self.setupUi()
self.updateWidgets()
notifier = Phonon.BackendCapabilities.notifier()
notifier.capabilitiesChanged.connect(self.updateWidgets)
notifier.availableAudioOutputDevicesChanged.connect(self.updateWidgets)
def updateWidgets(self):
# Output devices.
devices = Phonon.BackendCapabilities.availableAudioOutputDevices()
model = Phonon.AudioOutputDeviceModel(devices)
self.devicesListView.setModel(model)
# MIME types.
self.mimeListWidget.clear()
for mimeType in Phonon.BackendCapabilities.availableMimeTypes():
item = QtGui.QListWidgetItem(self.mimeListWidget)
item.setText(mimeType)
# Effects.
self.effectsTreeWidget.clear()
for effect in Phonon.BackendCapabilities.availableAudioEffects():
item = QtGui.QTreeWidgetItem(self.effectsTreeWidget)
item.setText(0, "Effect")
item.setText(1, effect.name())
item.setText(2, effect.description())
# Effects parameters.
for parameter in Phonon.Effect(effect, self).parameters():
defaultValue = parameter.defaultValue()
minimumValue = parameter.minimumValue()
maximumValue = parameter.maximumValue()
valueString = "%s / %s / %s" % (defaultValue, minimumValue, maximumValue)
parameterItem = QtGui.QTreeWidgetItem(item)
parameterItem.setText(0, "Parameter")
parameterItem.setText(1, parameter.name())
parameterItem.setText(2, parameter.description())
parameterItem.setText(3, str(parameter.type()))
parameterItem.setText(4, valueString)
for i in range(self.effectsTreeWidget.columnCount()):
if i == 0:
self.effectsTreeWidget.setColumnWidth(0, 150)
elif i == 2:
self.effectsTreeWidget.setColumnWidth(2, 350)
else:
self.effectsTreeWidget.resizeColumnToContents(i)
def setupUi(self):
self.setupBackendBox()
layout = QtGui.QVBoxLayout()
layout.addWidget(self.backendBox)
self.setLayout(layout)
self.setWindowTitle("Backend Capabilities Example")
def setupBackendBox(self):
self.devicesLabel = QtGui.QLabel("Available Audio Devices:")
self.devicesListView = QtGui.QListView()
self.mimeTypesLabel = QtGui.QLabel("Supported MIME Types:")
self.mimeListWidget = QtGui.QListWidget()
self.effectsLabel = QtGui.QLabel("Available Audio Effects:")
headerLabels = ("Type", "Name", "Description", "Value Type",
"Default/Min/Max Values")
self.effectsTreeWidget = QtGui.QTreeWidget()
self.effectsTreeWidget.setHeaderLabels(headerLabels)
self.effectsTreeWidget.setColumnCount(5)
layout = QtGui.QGridLayout()
layout.addWidget(self.devicesLabel, 0, 0)
layout.addWidget(self.devicesListView, 1, 0)
layout.addWidget(self.mimeTypesLabel, 0, 1)
layout.addWidget(self.mimeListWidget, 1, 1)
layout.addWidget(self.effectsLabel, 2, 0)
layout.addWidget(self.effectsTreeWidget, 3, 0, 2, 2)
layout.setRowStretch(3, 100)
self.backendBox = QtGui.QGroupBox("Backend Capabilities")
self.backendBox.setLayout(layout)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
app.setApplicationName("Phonon Capabilities Example")
window = Window()
window.show()
sys.exit(app.exec_())
| src/python/Lib/site-packages/PySide/examples/phonon/capabilities.py | 5,051 | !/usr/bin/env python Copyright (C) 2007-2008 Trolltech ASA. All rights reserved. This file is part of the example classes of the Qt Toolkit. Licensees holding a valid Qt License Agreement may use this file in accordance with the rights, responsibilities and obligations contained therein. Please consult your licensing agreement or contact sales@trolltech.com if any conditions of this licensing agreement are not clear to you. Further information about Qt licensing is available at: http://www.trolltech.com/products/qt/licensing.html or by contacting info@trolltech.com. This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. Output devices. MIME types. Effects. Effects parameters. | 776 | en | 0.843279 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for loading data from a Google service account file."""
import io
import json
import six
from google.auth import crypt
def from_dict(data, require=None):
"""Validates a dictionary containing Google service account data.
Creates and returns a :class:`google.auth.crypt.Signer` instance from the
private key specified in the data.
Args:
data (Mapping[str, str]): The service account data
require (Sequence[str]): List of keys required to be present in the
info.
Returns:
google.auth.crypt.Signer: A signer created from the private key in the
service account file.
Raises:
ValueError: if the data was in the wrong format, or if one of the
required keys is missing.
"""
keys_needed = set(require if require is not None else [])
missing = keys_needed.difference(six.iterkeys(data))
if missing:
raise ValueError(
"Service account info was not in the expected format, missing "
"fields {}.".format(", ".join(missing))
)
# Create a signer.
signer = crypt.RSASigner.from_service_account_info(data)
return signer
def from_filename(filename, require=None):
"""Reads a Google service account JSON file and returns its parsed info.
Args:
filename (str): The path to the service account .json file.
require (Sequence[str]): List of keys required to be present in the
info.
Returns:
Tuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified
info and a signer instance.
"""
with io.open(filename, "r", encoding="utf-8") as json_file:
data = json.load(json_file)
return data, from_dict(data, require=require)
| google/auth/_service_account_info.py | 2,359 | Validates a dictionary containing Google service account data.
Creates and returns a :class:`google.auth.crypt.Signer` instance from the
private key specified in the data.
Args:
data (Mapping[str, str]): The service account data
require (Sequence[str]): List of keys required to be present in the
info.
Returns:
google.auth.crypt.Signer: A signer created from the private key in the
service account file.
Raises:
ValueError: if the data was in the wrong format, or if one of the
required keys is missing.
Reads a Google service account JSON file and returns its parsed info.
Args:
filename (str): The path to the service account .json file.
require (Sequence[str]): List of keys required to be present in the
info.
Returns:
Tuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified
info and a signer instance.
Helper functions for loading data from a Google service account file.
Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Create a signer. | 1,531 | en | 0.788716 |
"""
ASGI config for infosafe project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'infosafe.settings')
application = get_asgi_application()
| infosafe/asgi.py | 393 | ASGI config for infosafe project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ | 214 | en | 0.720323 |
from rest_framework import generics
from rest_framework.exceptions import NotFound
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from workprogramsapp.expertise.models import UserExpertise, ExpertiseComments, Expertise
from workprogramsapp.expertise.serializers import UserExpertiseSerializer, CommentSerializer, ExpertiseSerializer
from workprogramsapp.permissions import IsMemberOfExpertise, IsRpdDeveloperOrReadOnly, IsMemberOfUserExpertise, \
IsExpertiseMaster, IsWorkProgramMemberOfExpertise
from workprogramsapp.workprogram_additions.models import UserStructuralUnit
class UserExpertiseListView(generics.ListAPIView):
"""
Вывод всей информации об экспертизе для эксперта (автоматически по токену пользователя выдает экспертизы, в которых он учавствует):
Если нужна опредленная экспертиза от пользователя, то надо указать ее id
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfExpertise]
def get_queryset(self, *args, **kwargs):
if ('pk' in dict(self.kwargs)):
return UserExpertise.objects.filter(expertise=self.kwargs['pk'], expert=self.request.user)
else:
return UserExpertise.objects.filter(expert=self.request.user)
class UserExpertiseCreateView(generics.CreateAPIView):
"""
создание экспертизы
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfExpertise]
class ExpertiseCommentsView(generics.ListAPIView):
"""
View для получения и отправки комментариев
Комментарии можно получить или отправить, указав в адресе id экспертизы,
При желании можно в параметрах указать блок комментариев для GET-запроса
"""
queryset = ExpertiseComments.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsMemberOfExpertise]
def get_queryset(self, *args, **kwargs):
if ('pk' in dict(self.kwargs)):
if self.request.query_params.get('block') != None:
return ExpertiseComments.objects.filter(user_expertise__expertise=self.kwargs['pk'],
comment_block=self.request.query_params.get('block'))
else:
return ExpertiseComments.objects.filter(user_expertise__expertise=self.kwargs['pk'])
else:
return ExpertiseComments.objects.all()
class ExpertiseCommentCreateView(generics.CreateAPIView):
"""
создание коммента к экспертизе
"""
queryset = ExpertiseComments.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsMemberOfExpertise]
class ExpertiseWorkProgramView(generics.RetrieveAPIView):
# TODO: Зачем вообще эта вьюха нужна?
"""
ссылка выдает все экспертизы связанные с id рабочей программы
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsWorkProgramMemberOfExpertise, IsRpdDeveloperOrReadOnly]
def get_object(self):
try:
return Expertise.objects.get(work_program__id=self.kwargs['pk'])
except Expertise.DoesNotExist:
raise NotFound()
class ExpertiseListView(generics.ListAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsMemberOfUserExpertise]
def list(self, request, **kwargs):
# Note the use of `get_queryset()` instead of `self.queryset`
if request.user.groups.filter(name="expertise_master"):
queryset = Expertise.objects.all()
elif UserStructuralUnit.objects.filter(user=request.user, status__in=["leader", "deputy"]):
queryset = Expertise.objects.filter(
work_program__structural_unit__user_in_structural_unit__user=request.user,
work_program__structural_unit__user_in_structural_unit__status__in=["leader", "deputy"]).distinct() | \
Expertise.objects.filter(expertse_users_in_rpd__expert=request.user).distinct()
else:
queryset = Expertise.objects.filter(expertse_users_in_rpd__expert=request.user)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
newdata = dict(serializer.data[0])
return Response("newdata")
class ExpertiseViewById(generics.RetrieveAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsExpertiseMaster]
class ExpertiseCreateView(generics.CreateAPIView):
"""
Создание экспертизы
Автоматически добавляет пользователя-создателя как лидера экспертизы
(Подробней о создании экспертизы см. сериализатор)
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsRpdDeveloperOrReadOnly]
class ChangeExpertiseView(generics.UpdateAPIView):
"""
Редактирование экспертизы
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsExpertiseMaster]
class ChangeUserExpertiseView(generics.UpdateAPIView):
"""
Редактирование экспертизы отдельного пользователя
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfUserExpertise]
class DeleteUserExpertise(generics.DestroyAPIView):
"""
Редактирование экспертизы отдельного пользователя
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsExpertiseMaster]
| application/workprogramsapp/expertise/views.py | 6,570 | Редактирование экспертизы
Редактирование экспертизы отдельного пользователя
Редактирование экспертизы отдельного пользователя
создание коммента к экспертизе
View для получения и отправки комментариев
Комментарии можно получить или отправить, указав в адресе id экспертизы,
При желании можно в параметрах указать блок комментариев для GET-запроса
Создание экспертизы
Автоматически добавляет пользователя-создателя как лидера экспертизы
(Подробней о создании экспертизы см. сериализатор)
ссылка выдает все экспертизы связанные с id рабочей программы
создание экспертизы
Вывод всей информации об экспертизе для эксперта (автоматически по токену пользователя выдает экспертизы, в которых он учавствует):
Если нужна опредленная экспертиза от пользователя, то надо указать ее id
TODO: Зачем вообще эта вьюха нужна? Note the use of `get_queryset()` instead of `self.queryset` | 870 | ru | 0.996548 |
import datetime
from ..errors import NaiveDateTimeNotAllowed
from ..ewsdatetime import EWSDateTime
from ..util import create_element, set_xml_value, xml_text_to_value, peek, TNS, MNS
from ..version import EXCHANGE_2010
from .common import EWSService
class GetServerTimeZones(EWSService):
"""
MSDN: https://msdn.microsoft.com/en-us/library/office/dd899371(v=exchg.150).aspx
"""
SERVICE_NAME = 'GetServerTimeZones'
element_container_name = '{%s}TimeZoneDefinitions' % MNS
def call(self, timezones=None, return_full_timezone_data=False):
if self.protocol.version.build < EXCHANGE_2010:
raise NotImplementedError('%s is only supported for Exchange 2010 servers and later' % self.SERVICE_NAME)
return self._get_elements(payload=self.get_payload(
timezones=timezones,
return_full_timezone_data=return_full_timezone_data
))
def get_payload(self, timezones, return_full_timezone_data):
payload = create_element(
'm:%s' % self.SERVICE_NAME,
attrs=dict(ReturnFullTimeZoneData='true' if return_full_timezone_data else 'false'),
)
if timezones is not None:
is_empty, timezones = peek(timezones)
if not is_empty:
tz_ids = create_element('m:Ids')
for timezone in timezones:
tz_id = set_xml_value(create_element('t:Id'), timezone.ms_id, version=self.protocol.version)
tz_ids.append(tz_id)
payload.append(tz_ids)
return payload
def _get_elements_in_container(self, container):
for timezonedef in container:
tz_id = timezonedef.get('Id')
tz_name = timezonedef.get('Name')
tz_periods = self._get_periods(timezonedef)
tz_transitions_groups = self._get_transitions_groups(timezonedef)
tz_transitions = self._get_transitions(timezonedef)
yield (tz_id, tz_name, tz_periods, tz_transitions, tz_transitions_groups)
@staticmethod
def _get_periods(timezonedef):
tz_periods = {}
periods = timezonedef.find('{%s}Periods' % TNS)
for period in periods.findall('{%s}Period' % TNS):
# Convert e.g. "trule:Microsoft/Registry/W. Europe Standard Time/2006-Daylight" to (2006, 'Daylight')
p_year, p_type = period.get('Id').rsplit('/', 1)[1].split('-')
tz_periods[(int(p_year), p_type)] = dict(
name=period.get('Name'),
bias=xml_text_to_value(period.get('Bias'), datetime.timedelta)
)
return tz_periods
@staticmethod
def _get_transitions_groups(timezonedef):
from ..recurrence import WEEKDAY_NAMES
tz_transitions_groups = {}
transitiongroups = timezonedef.find('{%s}TransitionsGroups' % TNS)
if transitiongroups is not None:
for transitiongroup in transitiongroups.findall('{%s}TransitionsGroup' % TNS):
tg_id = int(transitiongroup.get('Id'))
tz_transitions_groups[tg_id] = []
for transition in transitiongroup.findall('{%s}Transition' % TNS):
# Apply same conversion to To as for period IDs
to_year, to_type = transition.find('{%s}To' % TNS).text.rsplit('/', 1)[1].split('-')
tz_transitions_groups[tg_id].append(dict(
to=(int(to_year), to_type),
))
for transition in transitiongroup.findall('{%s}RecurringDayTransition' % TNS):
# Apply same conversion to To as for period IDs
to_year, to_type = transition.find('{%s}To' % TNS).text.rsplit('/', 1)[1].split('-')
occurrence = xml_text_to_value(transition.find('{%s}Occurrence' % TNS).text, int)
if occurrence == -1:
# See TimeZoneTransition.from_xml()
occurrence = 5
tz_transitions_groups[tg_id].append(dict(
to=(int(to_year), to_type),
offset=xml_text_to_value(transition.find('{%s}TimeOffset' % TNS).text, datetime.timedelta),
iso_month=xml_text_to_value(transition.find('{%s}Month' % TNS).text, int),
iso_weekday=WEEKDAY_NAMES.index(transition.find('{%s}DayOfWeek' % TNS).text) + 1,
occurrence=occurrence,
))
return tz_transitions_groups
@staticmethod
def _get_transitions(timezonedef):
tz_transitions = {}
transitions = timezonedef.find('{%s}Transitions' % TNS)
if transitions is not None:
for transition in transitions.findall('{%s}Transition' % TNS):
to = transition.find('{%s}To' % TNS)
if to.get('Kind') != 'Group':
raise ValueError('Unexpected "Kind" XML attr: %s' % to.get('Kind'))
tg_id = xml_text_to_value(to.text, int)
tz_transitions[tg_id] = None
for transition in transitions.findall('{%s}AbsoluteDateTransition' % TNS):
to = transition.find('{%s}To' % TNS)
if to.get('Kind') != 'Group':
raise ValueError('Unexpected "Kind" XML attr: %s' % to.get('Kind'))
tg_id = xml_text_to_value(to.text, int)
try:
t_date = xml_text_to_value(transition.find('{%s}DateTime' % TNS).text, EWSDateTime).date()
except NaiveDateTimeNotAllowed as e:
# We encountered a naive datetime. Don't worry. we just need the date
t_date = e.args[0].date()
tz_transitions[tg_id] = t_date
return tz_transitions
| exchangelib/services/get_server_time_zones.py | 5,814 | MSDN: https://msdn.microsoft.com/en-us/library/office/dd899371(v=exchg.150).aspx
Convert e.g. "trule:Microsoft/Registry/W. Europe Standard Time/2006-Daylight" to (2006, 'Daylight') Apply same conversion to To as for period IDs Apply same conversion to To as for period IDs See TimeZoneTransition.from_xml() We encountered a naive datetime. Don't worry. we just need the date | 376 | en | 0.763411 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for the Embedding Projector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import io
import json
import os
import numpy as np
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
from tensorflow.tensorboard.backend import application
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.plugins.projector import projector_plugin
class ProjectorAppTest(test.TestCase):
def setUp(self):
self.log_dir = self.get_temp_dir()
def testRunsWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, ['.'])
def testRunsWithNoCheckpoint(self):
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, [])
def testRunsWithInvalidModelCheckpointPath(self):
checkpoint_file = os.path.join(self.log_dir, 'checkpoint')
f = open(checkpoint_file, 'w')
f.write('model_checkpoint_path: "does_not_exist"\n')
f.write('all_model_checkpoint_paths: "does_not_exist"\n')
f.close()
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, [])
def testInfoWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
info_json = self._GetJson('/data/plugin/projector/info?run=.')
self.assertItemsEqual(info_json['embeddings'], [{
'tensorShape': [1, 2],
'tensorName': 'var1'
}, {
'tensorShape': [10, 10],
'tensorName': 'var2'
}, {
'tensorShape': [100, 100],
'tensorName': 'var3'
}])
def testTensorWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
url = '/data/plugin/projector/tensor?run=.&name=var1'
tensor_bytes = self._Get(url).data
tensor = np.reshape(np.fromstring(tensor_bytes, dtype='float32'), [1, 2])
expected_tensor = np.array([[6, 6]], dtype='float32')
self.assertTrue(np.array_equal(tensor, expected_tensor))
def _SetupWSGIApp(self):
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
projector = projector_plugin.ProjectorPlugin()
projector.get_plugin_apps(multiplexer, self.log_dir)
plugins = {'projector': projector}
wsgi_app = application.TensorBoardWSGIApp(
self.log_dir, plugins, multiplexer, reload_interval=0)
self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
def _Get(self, path):
return self.server.get(path)
def _GetJson(self, path):
response = self.server.get(path)
data = response.data
if response.headers.get('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, io.BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def _GenerateProjectorTestData(self):
config_path = os.path.join(self.log_dir, 'projector_config.pbtxt')
config = ProjectorConfig()
embedding = config.embeddings.add()
# Add an embedding by its canonical tensor name.
embedding.tensor_name = 'var1:0'
config_pbtxt = text_format.MessageToString(config)
with gfile.GFile(config_path, 'w') as f:
f.write(config_pbtxt)
# Write a checkpoint with some dummy variables.
with ops.Graph().as_default():
sess = session.Session()
checkpoint_path = os.path.join(self.log_dir, 'model')
variable_scope.get_variable(
'var1', [1, 2], initializer=init_ops.constant_initializer(6.0))
variable_scope.get_variable('var2', [10, 10])
variable_scope.get_variable('var3', [100, 100])
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
saver.save(sess, checkpoint_path)
if __name__ == '__main__':
test.main()
| tensorflow/tensorboard/plugins/projector/projector_plugin_test.py | 5,280 | Integration tests for the Embedding Projector.
Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Add an embedding by its canonical tensor name. Write a checkpoint with some dummy variables. | 802 | en | 0.838456 |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def compute_lr(target_lr, n_epochs, train_set_size, batch_size, warmup):
total = (n_epochs - 1) * int(np.ceil(train_set_size / batch_size))
progress = [float(t) / total for t in range(0, total)]
factor = [p / warmup if p < warmup else max((p - 1.) / (warmup - 1.), 0.) for p in progress]
lr = [f * target_lr for f in factor]
return lr
def load_train_log(directories, num_epochs, target_lr, **kwargs):
parts = []
for d, ep, t_lr in zip(directories, num_epochs, target_lr):
files = ['{}/loss_ep{}.pkl'.format(d, i) for i in range(1, ep)]
files = [f for f in files if os.path.exists(f)]
part = pd.concat([pd.read_pickle(f) for f in files])
part['lr'] = compute_lr(target_lr=t_lr, n_epochs=ep, **kwargs)[0:len(part)]
parts.append(part)
return pd.concat(parts).reset_index(drop=True)
def plot_loss_against_lr(loss, wnd_size=6000):
fig = plt.figure(figsize=(11.69, 8.27))
ax1 = fig.add_subplot(111)
ax1.set_xlabel('time')
ax1.set_ylabel('loss', color='b')
ax1.plot(loss.loss.rolling(wnd_size).mean(), color='b')
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel('learning rate', color='r')
ax2.plot(loss.lr.rolling(wnd_size).mean(), 'r')
| qurator/sbb_ned/models/evaluation.py | 1,376 | instantiate a second axes that shares the same x-axis | 53 | en | 0.86361 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'monetdbe'
copyright = '2021, MonetDB Solutions'
author = 'Niels Nes'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# This is needed to keep readthedocs happy
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| documentation/monetdbe/conf.py | 1,957 | Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys sys.path.insert(0, os.path.abspath('.')) -- Project information ----------------------------------------------------- The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. This is needed to keep readthedocs happy -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". | 1,634 | en | 0.691947 |
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
from datetime import datetime, timedelta
from awscli.formatter import get_formatter
from awscli.arguments import CustomArgument
from awscli.customizations.commands import BasicCommand
from awscli.customizations.datapipeline import translator
from awscli.customizations.datapipeline.createdefaultroles \
import CreateDefaultRoles
from awscli.customizations.datapipeline.listrunsformatter \
import ListRunsFormatter
DEFINITION_HELP_TEXT = """\
The JSON pipeline definition. If the pipeline definition
is in a file you can use the file://<filename> syntax to
specify a filename.
"""
PARAMETER_OBJECTS_HELP_TEXT = """\
The JSON parameter objects. If the parameter objects are
in a file you can use the file://<filename> syntax to
specify a filename. You can optionally provide these in
pipeline definition as well. Parameter objects provided
on command line would replace the one in definition.
"""
PARAMETER_VALUES_HELP_TEXT = """\
The JSON parameter values. If the parameter values are
in a file you can use the file://<filename> syntax to
specify a filename. You can optionally provide these in
pipeline definition as well. Parameter values provided
on command line would replace the one in definition.
"""
INLINE_PARAMETER_VALUES_HELP_TEXT = """\
The JSON parameter values. You can specify these as
key-value pairs in the key=value format. Multiple parameters
are separated by a space. For list type parameter values
you can use the same key name and specify each value as
a key value pair. e.g. arrayValue=value1 arrayValue=value2
"""
class DocSectionNotFoundError(Exception):
pass
class ParameterDefinitionError(Exception):
def __init__(self, msg):
full_msg = ("Error in parameter: %s\n" % msg)
super(ParameterDefinitionError, self).__init__(full_msg)
self.msg = msg
def register_customizations(cli):
cli.register(
'building-argument-table.datapipeline.put-pipeline-definition',
add_pipeline_definition)
cli.register(
'building-argument-table.datapipeline.activate-pipeline',
activate_pipeline_definition)
cli.register(
'after-call.datapipeline.GetPipelineDefinition',
translate_definition)
cli.register(
'building-command-table.datapipeline',
register_commands)
cli.register_last(
'doc-output.datapipeline.get-pipeline-definition',
document_translation)
def register_commands(command_table, session, **kwargs):
command_table['list-runs'] = ListRunsCommand(session)
command_table['create-default-roles'] = CreateDefaultRoles(session)
def document_translation(help_command, **kwargs):
# Remove all the writes until we get to the output.
# I don't think this is the ideal way to do this, we should
# improve our plugin/doc system to make this easier.
doc = help_command.doc
current = ''
while current != '======\nOutput\n======':
try:
current = doc.pop_write()
except IndexError:
# This should never happen, but in the rare case that it does
# we should be raising something with a helpful error message.
raise DocSectionNotFoundError(
'Could not find the "output" section for the command: %s'
% help_command)
doc.write('======\nOutput\n======')
doc.write(
'\nThe output of this command is the pipeline definition, which'
' is documented in the '
'`Pipeline Definition File Syntax '
'<http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/'
'dp-writing-pipeline-definition.html>`__')
def add_pipeline_definition(argument_table, **kwargs):
argument_table['pipeline-definition'] = PipelineDefinitionArgument(
'pipeline-definition', required=True,
help_text=DEFINITION_HELP_TEXT)
argument_table['parameter-objects'] = ParameterObjectsArgument(
'parameter-objects', required=False,
help_text=PARAMETER_OBJECTS_HELP_TEXT)
argument_table['parameter-values-uri'] = ParameterValuesArgument(
'parameter-values-uri',
required=False,
help_text=PARAMETER_VALUES_HELP_TEXT)
# Need to use an argument model for inline parameters to accept a list
argument_table['parameter-values'] = ParameterValuesInlineArgument(
'parameter-values',
required=False,
nargs='+',
help_text=INLINE_PARAMETER_VALUES_HELP_TEXT)
# The pipeline-objects is no longer needed required because
# a user can provide a pipeline-definition instead.
# get-pipeline-definition also displays the output in the
# translated format.
del argument_table['pipeline-objects']
def activate_pipeline_definition(argument_table, **kwargs):
argument_table['parameter-values-uri'] = ParameterValuesArgument(
'parameter-values-uri', required=False,
help_text=PARAMETER_VALUES_HELP_TEXT)
# Need to use an argument model for inline parameters to accept a list
argument_table['parameter-values'] = ParameterValuesInlineArgument(
'parameter-values',
required=False,
nargs='+',
help_text=INLINE_PARAMETER_VALUES_HELP_TEXT,
)
def translate_definition(parsed, **kwargs):
translator.api_to_definition(parsed)
def convert_described_objects(api_describe_objects, sort_key_func=None):
# We need to take a field list that looks like this:
# {u'key': u'@sphere', u'stringValue': u'INSTANCE'},
# into {"@sphere": "INSTANCE}.
# We convert the fields list into a field dict.
converted = []
for obj in api_describe_objects:
new_fields = {
'@id': obj['id'],
'name': obj['name'],
}
for field in obj['fields']:
new_fields[field['key']] = field.get('stringValue',
field.get('refValue'))
converted.append(new_fields)
if sort_key_func is not None:
converted.sort(key=sort_key_func)
return converted
class QueryArgBuilder(object):
"""
Convert CLI arguments to Query arguments used by QueryObject.
"""
def __init__(self, current_time=None):
if current_time is None:
current_time = datetime.utcnow()
self.current_time = current_time
def build_query(self, parsed_args):
selectors = []
if parsed_args.start_interval is None and \
parsed_args.schedule_interval is None:
# If no intervals are specified, default
# to a start time of 4 days ago and an end time
# of right now.
end_datetime = self.current_time
start_datetime = end_datetime - timedelta(days=4)
start_time_str = start_datetime.strftime('%Y-%m-%dT%H:%M:%S')
end_time_str = end_datetime.strftime('%Y-%m-%dT%H:%M:%S')
selectors.append({
'fieldName': '@actualStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
else:
self._build_schedule_times(selectors, parsed_args)
if parsed_args.status is not None:
self._build_status(selectors, parsed_args)
query = {'selectors': selectors}
return query
def _build_schedule_times(self, selectors, parsed_args):
if parsed_args.start_interval is not None:
start_time_str = parsed_args.start_interval[0]
end_time_str = parsed_args.start_interval[1]
selectors.append({
'fieldName': '@actualStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
if parsed_args.schedule_interval is not None:
start_time_str = parsed_args.schedule_interval[0]
end_time_str = parsed_args.schedule_interval[1]
selectors.append({
'fieldName': '@scheduledStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
def _build_status(self, selectors, parsed_args):
selectors.append({
'fieldName': '@status',
'operator': {
'type': 'EQ',
'values': [status.upper() for status in parsed_args.status]
}
})
class PipelineDefinitionArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
parsed = json.loads(value)
api_objects = translator.definition_to_api_objects(parsed)
parameter_objects = translator.definition_to_api_parameters(parsed)
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['pipelineObjects'] = api_objects
# Use Parameter objects and values from def if not already provided
if 'parameterObjects' not in parameters \
and parameter_objects is not None:
parameters['parameterObjects'] = parameter_objects
if 'parameterValues' not in parameters \
and parameter_values is not None:
parameters['parameterValues'] = parameter_values
class ParameterObjectsArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
parsed = json.loads(value)
parameter_objects = translator.definition_to_api_parameters(parsed)
parameters['parameterObjects'] = parameter_objects
class ParameterValuesArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
if parameters.get('parameterValues', None) is not None:
raise Exception(
"Only parameter-values or parameter-values-uri is allowed"
)
parsed = json.loads(value)
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['parameterValues'] = parameter_values
class ParameterValuesInlineArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
if parameters.get('parameterValues', None) is not None:
raise Exception(
"Only parameter-values or parameter-values-uri is allowed"
)
parameter_object = {}
# break string into = point
for argument in value:
try:
argument_components = argument.split('=', 1)
key = argument_components[0]
value = argument_components[1]
if key in parameter_object:
parameter_object[key] = [parameter_object[key], value]
else:
parameter_object[key] = value
except IndexError:
raise ParameterDefinitionError(
"Invalid inline parameter format: %s" % argument
)
parsed = {'values': parameter_object}
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['parameterValues'] = parameter_values
class ListRunsCommand(BasicCommand):
NAME = 'list-runs'
DESCRIPTION = (
'Lists the times the specified pipeline has run. '
'You can optionally filter the complete list of '
'results to include only the runs you are interested in.')
ARG_TABLE = [
{'name': 'pipeline-id', 'help_text': 'The identifier of the pipeline.',
'action': 'store', 'required': True, 'cli_type_name': 'string', },
{'name': 'status',
'help_text': (
'Filters the list to include only runs in the '
'specified statuses. '
'The valid statuses are as follows: waiting, pending, cancelled, '
'running, finished, failed, waiting_for_runner, '
'and waiting_on_dependencies. You can combine statuses as a '
'comma-separated list. For example: '
'<code>--status pending,waiting_on_dependencies</code>'),
'action': 'store'},
{'name': 'start-interval',
'help_text': (
'Filters the list to include only runs that started '
'within the specified interval.'),
'action': 'store', 'required': False, 'cli_type_name': 'string', },
{'name': 'schedule-interval',
'help_text': (
'Filters the list to include only runs that are scheduled to '
'start within the specified interval.'),
'action': 'store', 'required': False, 'cli_type_name': 'string', },
]
VALID_STATUS = ['waiting', 'pending', 'cancelled', 'running',
'finished', 'failed', 'waiting_for_runner',
'waiting_on_dependencies', 'shutting_down']
def _run_main(self, parsed_args, parsed_globals, **kwargs):
self._set_client(parsed_globals)
self._parse_type_args(parsed_args)
self._list_runs(parsed_args, parsed_globals)
def _set_client(self, parsed_globals):
# This is called from _run_main and is used to ensure that we have
# a service/endpoint object to work with.
self.client = self._session.create_client(
'datapipeline',
region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
verify=parsed_globals.verify_ssl)
def _parse_type_args(self, parsed_args):
# TODO: give good error messages!
# Parse the start/schedule times.
# Parse the status csv.
if parsed_args.start_interval is not None:
parsed_args.start_interval = [
arg.strip() for arg in
parsed_args.start_interval.split(',')]
if parsed_args.schedule_interval is not None:
parsed_args.schedule_interval = [
arg.strip() for arg in
parsed_args.schedule_interval.split(',')]
if parsed_args.status is not None:
parsed_args.status = [
arg.strip() for arg in
parsed_args.status.split(',')]
self._validate_status_choices(parsed_args.status)
def _validate_status_choices(self, statuses):
for status in statuses:
if status not in self.VALID_STATUS:
raise ValueError("Invalid status: %s, must be one of: %s" %
(status, ', '.join(self.VALID_STATUS)))
def _list_runs(self, parsed_args, parsed_globals):
query = QueryArgBuilder().build_query(parsed_args)
object_ids = self._query_objects(parsed_args.pipeline_id, query)
objects = self._describe_objects(parsed_args.pipeline_id, object_ids)[
'pipelineObjects']
converted = convert_described_objects(
objects,
sort_key_func=lambda x: (x.get('@scheduledStartTime'),
x.get('name')))
formatter = self._get_formatter(parsed_globals)
formatter(self.NAME, converted)
def _describe_objects(self, pipeline_id, object_ids):
parsed = self.client.describe_objects(
pipelineId=pipeline_id, objectIds=object_ids)
return parsed
def _query_objects(self, pipeline_id, query):
paginator = self.client.get_paginator('query_objects').paginate(
pipelineId=pipeline_id,
sphere='INSTANCE', query=query)
parsed = paginator.build_full_result()
return parsed['ids']
def _get_formatter(self, parsed_globals):
output = parsed_globals.output
if output is None:
return ListRunsFormatter(parsed_globals)
else:
return get_formatter(output, parsed_globals)
| dist/awscli/customizations/datapipeline/__init__.py | 16,503 | Convert CLI arguments to Query arguments used by QueryObject.
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Remove all the writes until we get to the output. I don't think this is the ideal way to do this, we should improve our plugin/doc system to make this easier. This should never happen, but in the rare case that it does we should be raising something with a helpful error message. Need to use an argument model for inline parameters to accept a list The pipeline-objects is no longer needed required because a user can provide a pipeline-definition instead. get-pipeline-definition also displays the output in the translated format. Need to use an argument model for inline parameters to accept a list We need to take a field list that looks like this: {u'key': u'@sphere', u'stringValue': u'INSTANCE'}, into {"@sphere": "INSTANCE}. We convert the fields list into a field dict. If no intervals are specified, default to a start time of 4 days ago and an end time of right now. Use Parameter objects and values from def if not already provided break string into = point This is called from _run_main and is used to ensure that we have a service/endpoint object to work with. TODO: give good error messages! Parse the start/schedule times. Parse the status csv. | 1,764 | en | 0.834377 |
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class ProtectionInfo(object):
"""Implementation of the 'ProtectionInfo' model.
dataLocation defines data location related information.
Attributes:
end_time_usecs (long|int): Specifies the end time for object
retention.
location (string): Specifies the location of the object.
policy_id (string): Specifies the id of the policy.
protection_job_id (long|int): Specifies the id of the protection job.
protection_job_name (string): Specifies the protection job name which
protects this object.
retention_period (long|int): Specifies the retention period.
start_time_usecs (long|int): Specifies the start time for object
retention.
storage_domain (string): Specifies the storage domain name.
total_snapshots (long|int): Specifies the total number of snapshots.
"""
# Create a mapping from Model property names to API property names
_names = {
"end_time_usecs":'endTimeUsecs',
"location":'location',
"policy_id":'policyId',
"protection_job_id":'protectionJobId',
"protection_job_name":'protectionJobName',
"retention_period":'retentionPeriod',
"start_time_usecs":'startTimeUsecs',
"storage_domain":'storageDomain',
"total_snapshots":'totalSnapshots'
}
def __init__(self,
end_time_usecs=None,
location=None,
policy_id=None,
protection_job_id=None,
protection_job_name=None,
retention_period=None,
start_time_usecs=None,
storage_domain=None,
total_snapshots=None):
"""Constructor for the ProtectionInfo class"""
# Initialize members of the class
self.end_time_usecs = end_time_usecs
self.location = location
self.policy_id = policy_id
self.protection_job_id = protection_job_id
self.protection_job_name = protection_job_name
self.retention_period = retention_period
self.start_time_usecs = start_time_usecs
self.storage_domain = storage_domain
self.total_snapshots = total_snapshots
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
end_time_usecs = dictionary.get('endTimeUsecs')
location = dictionary.get('location')
policy_id = dictionary.get('policyId')
protection_job_id = dictionary.get('protectionJobId')
protection_job_name = dictionary.get('protectionJobName')
retention_period = dictionary.get('retentionPeriod')
start_time_usecs = dictionary.get('startTimeUsecs')
storage_domain = dictionary.get('storageDomain')
total_snapshots = dictionary.get('totalSnapshots')
# Return an object of this model
return cls(end_time_usecs,
location,
policy_id,
protection_job_id,
protection_job_name,
retention_period,
start_time_usecs,
storage_domain,
total_snapshots)
| cohesity_management_sdk/models/protection_info.py | 3,747 | Implementation of the 'ProtectionInfo' model.
dataLocation defines data location related information.
Attributes:
end_time_usecs (long|int): Specifies the end time for object
retention.
location (string): Specifies the location of the object.
policy_id (string): Specifies the id of the policy.
protection_job_id (long|int): Specifies the id of the protection job.
protection_job_name (string): Specifies the protection job name which
protects this object.
retention_period (long|int): Specifies the retention period.
start_time_usecs (long|int): Specifies the start time for object
retention.
storage_domain (string): Specifies the storage domain name.
total_snapshots (long|int): Specifies the total number of snapshots.
Constructor for the ProtectionInfo class
Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
-*- coding: utf-8 -*- Copyright 2021 Cohesity Inc. Create a mapping from Model property names to API property names Initialize members of the class Extract variables from the dictionary Return an object of this model | 1,363 | en | 0.739122 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of Archdiffer and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Created on Sun Mar 4 10:23:41 2018
@author: Pavla Kratochvilova <pavla.kratochvilova@gmail.com>
"""
import operator
import datetime
from flask import request
from .exceptions import BadRequest
def make_datetime(time_string, formats=None):
"""Makes datetime from string based on one of the formats.
:param string time_string: time in string
:param list formats: list of accepted formats
:return datetime.datetime: datetime or None if no format is matched
"""
if formats is None:
formats = [
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d',
]
for fmt in formats:
try:
return datetime.datetime.strptime(time_string, fmt)
except ValueError:
pass
return None
# Transformation functions
def _dict_transform(string):
return dict([item.split(':', 1) for item in string.split(';')])
def _list_transform(string):
return string.split(',')
# Transformations of common arguments
_TRANSFORMATIONS = {
'filter_by' : _dict_transform,
'filter' : _list_transform,
'order_by' : _list_transform,
'limit' : lambda x: int(x),
'offset' : lambda x: int(x),
}
# Filters creators
def before(column, name='before'):
"""Make filter template for filtering column values less or equal to
datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
"""
return {name: (column, operator.le, make_datetime)}
def after(column, name='after'):
"""Make filter template for filtering column values greater or equal to
datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
"""
return {name: (column, operator.ge, make_datetime)}
def time(column, name='time'):
"""Make filter template for filtering column values equal to datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
"""
return {name: (column, operator.eq, make_datetime)}
def equals(column, name='id', function=(lambda x: x)):
"""Make filter template for filtering column values equal to value
transformed by given function.
:param column: database model
:param string name: name used in the filter template
:param callable function: function for transforming the value
:return dict: resulting template
"""
return {name: (column, operator.eq, function)}
# Request parser
def parse_request(filters=None, defaults=None):
"""Parse arguments in request according to the _TRANSFORMATIONS or given
filters.
Requests containing other keys are considered invalid.
:param dict filters: dict of filter templates containing for each key
(column, operator, function transforming value from request argument)
:param dict defaults: default values of modifiers
:return dict: dict of parsed arguments
:raises werkzeug.exceptions.BadRequest: if one of the request arguments is
not recognized
"""
if filters is None:
filters = {}
if defaults is not None:
args_dict = defaults.copy()
else:
args_dict = {}
filters_list = []
for key, value in request.args.items():
if key in _TRANSFORMATIONS:
try:
args_dict[key] = _TRANSFORMATIONS[key](value)
except ValueError:
raise BadRequest('Argument has invalid value "%s".' % value)
elif key in filters.keys():
filters_list.append(
filters[key][1](filters[key][0], filters[key][2](value))
)
else:
raise BadRequest('Argument "%s" not recognized.' % key)
if 'filter' not in args_dict.keys():
args_dict['filter'] = []
args_dict['filter'] += filters_list
return args_dict
def get_request_arguments(*names, args_dict=None, invert=False):
"""Get arguments from args_dict or request if they match given names.
:param *names: names of arguments
:param dict args_dict: dict of arguments
:param bool invert: True if names should be exclueded instead
:return dict: dict of arguments
"""
if args_dict is None:
args_dict = parse_request()
if invert:
return {k:v for k, v in args_dict.items() if k not in names}
return {k:v for k, v in args_dict.items() if k in names}
def update_modifiers(old_modifiers, new_modifiers):
"""Update modifiers.
:param dict old_modifiers: old modifiers
:param dict old_modifiers: new modifiers
:return dict: resulting modifiers
"""
modifiers = old_modifiers.copy()
for key, value in new_modifiers.items():
if key in old_modifiers:
if _TRANSFORMATIONS.get(key) == _list_transform:
modifiers[key] += value
elif _TRANSFORMATIONS.get(key) == _dict_transform:
modifiers[key].update(value)
else:
modifiers[key] = value
else:
modifiers[key] = value
return modifiers
| archdiffer/flask_frontend/request_parser.py | 5,304 | Make filter template for filtering column values greater or equal to
datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
Make filter template for filtering column values less or equal to
datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
Make filter template for filtering column values equal to value
transformed by given function.
:param column: database model
:param string name: name used in the filter template
:param callable function: function for transforming the value
:return dict: resulting template
Get arguments from args_dict or request if they match given names.
:param *names: names of arguments
:param dict args_dict: dict of arguments
:param bool invert: True if names should be exclueded instead
:return dict: dict of arguments
Makes datetime from string based on one of the formats.
:param string time_string: time in string
:param list formats: list of accepted formats
:return datetime.datetime: datetime or None if no format is matched
Parse arguments in request according to the _TRANSFORMATIONS or given
filters.
Requests containing other keys are considered invalid.
:param dict filters: dict of filter templates containing for each key
(column, operator, function transforming value from request argument)
:param dict defaults: default values of modifiers
:return dict: dict of parsed arguments
:raises werkzeug.exceptions.BadRequest: if one of the request arguments is
not recognized
Make filter template for filtering column values equal to datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
Update modifiers.
:param dict old_modifiers: old modifiers
:param dict old_modifiers: new modifiers
:return dict: resulting modifiers
Created on Sun Mar 4 10:23:41 2018
@author: Pavla Kratochvilova <pavla.kratochvilova@gmail.com>
!/usr/bin/env python3 -*- coding: utf-8 -*- This file is part of Archdiffer and is released under the MIT License: http://www.opensource.org/licenses/mit-license.php Transformation functions Transformations of common arguments Filters creators Request parser | 2,252 | en | 0.51455 |
#
# mcfly
#
# Copyright 2017 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution1D, Lambda, \
Convolution2D, Flatten, \
Reshape, LSTM, Dropout, TimeDistributed, BatchNormalization, \
GlobalAveragePooling1D, Bidirectional
from keras.layers import CuDNNLSTM # Comment on HPC
from keras.regularizers import l2
from keras.optimizers import Adam
import numpy as np
def generate_models(
x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'],
model_type=None,
cnn_min_layers=5, cnn_max_layers=10,
cnn_min_filters=25, cnn_max_filters=100,
cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000,
deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7,
deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100,
deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3,
deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500,
low_lr=1, high_lr=4, low_reg=1, high_reg=3
):
"""
Generate one or multiple untrained Keras models with random hyperparameters.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
number_of_classes : int
Number of classes for classification task
number_of_models : int
Number of models to generate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
model_type : str, optional
Type of model to build: 'CNN' or 'DeepConvLSTM'.
Default option None generates both models.
cnn_min_layers : int
minimum of Conv layers in CNN model
cnn_max_layers : int
maximum of Conv layers in CNN model
cnn_min_filters : int
minimum number of filters per Conv layer in CNN model
cnn_max_filters : int
maximum number of filters per Conv layer in CNN model
cnn_min_fc_nodes : int
minimum number of hidden nodes per Dense layer in CNN model
cnn_max_fc_nodes : int
maximum number of hidden nodes per Dense layer in CNN model
deepconvlstm_min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
deepconvlstm_max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
models : list
List of compiled models
"""
models = []
for _ in range(0, number_of_models):
if model_type is None: # random model choice:
current_model_type = 'CNN' if np.random.random(
) < 0.5 else 'DeepConvLSTM'
else: # user-defined model choice:
current_model_type = model_type
generate_model = None
if current_model_type == 'CNN':
generate_model = generate_CNN_model # generate_model is a function
hyperparameters = generate_CNN_hyperparameter_set(
min_layers=cnn_min_layers, max_layers=cnn_max_layers,
min_filters=cnn_min_filters, max_filters=cnn_max_filters,
min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
if current_model_type == 'DeepConvLSTM':
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=deepconvlstm_min_conv_layers,
max_conv_layers=deepconvlstm_max_conv_layers,
min_conv_filters=deepconvlstm_min_conv_filters,
max_conv_filters=deepconvlstm_max_conv_filters,
min_lstm_layers=deepconvlstm_min_lstm_layers,
max_lstm_layers=deepconvlstm_max_lstm_layers,
min_lstm_dims=deepconvlstm_min_lstm_dims,
max_lstm_dims=deepconvlstm_max_lstm_dims,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
models.append(
(generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters),
hyperparameters, current_model_type))
return models
def generate_DeepConvLSTM_model(
x_shape, class_number, filters, lstm_dims, learning_rate=0.01,
regularization_rate=0.01, metrics=['accuracy']):
"""
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
output_dim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential() # initialize model
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
# reshape a 2 dimensional array per file/person/object into a
# 3 dimensional array
model.add(
Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
# filt: number of filters used in a layer
# filters: vector of filt values
model.add(
Convolution2D(filt, kernel_size=(3, 1), padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
# reshape 3 dimensional array back into a 2 dimensional array,
# but now with more dept as we have the the filters for each channel
model.add(Reshape(target_shape=(dim_length, filters[-1] * dim_channels)))
for lstm_dim in lstm_dims:
#model.add(LSTM(units=lstm_dim, return_sequences=True,
# activation='tanh'))
# comment following line for HPC
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5)) # dropout before the dense layer
# # set up final dense layer such that every timestamp is given one
# # classification
# model.add(
# TimeDistributed(
# Dense(units=output_dim, kernel_regularizer=l2(regularization_rate))))
# model.add(Activation("softmax"))
# # Final classification layer - per timestep
# model.add(Lambda(lambda x: x[:, -1, :], output_shape=[output_dim]))
# Pool output of all timesteps and perform classification using pooled output
model.add(GlobalAveragePooling1D())
model.add(Dense(units=output_dim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes,
learning_rate=0.01, regularization_rate=0.01,
metrics=['accuracy']):
"""
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
outputdim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential()
model.add(
BatchNormalization(
input_shape=(
dim_length,
dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes,
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit)) # Fully connected layer
model.add(Activation('relu')) # Relu activation
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10,
min_filters=10, max_filters=100,
min_fc_nodes=10, max_fc_nodes=2000,
low_lr=1, high_lr=4, low_reg=1,
high_reg=4):
""" Generate a hyperparameter set that define a CNN model.
Parameters
----------
min_layers : int
minimum of Conv layers
max_layers : int
maximum of Conv layers
min_filters : int
minimum number of filters per Conv layer
max_filters : int
maximum number of filters per Conv layer
min_fc_nodes : int
minimum number of hidden nodes per Dense layer
max_fc_nodes : int
maximum number of hidden nodes per Dense layer
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters : dict
parameters for a CNN model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, max_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_filters, max_filters + 1, number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(
min_fc_nodes, max_fc_nodes + 1)
return hyperparameters
def generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=1, max_conv_layers=10,
min_conv_filters=10, max_conv_filters=100,
min_lstm_layers=1, max_lstm_layers=5,
min_lstm_dims=10, max_lstm_dims=100,
low_lr=1, high_lr=4, low_reg=1, high_reg=4):
""" Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(
min_conv_layers, max_conv_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_conv_filters, max_conv_filters + 1, number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(
min_lstm_layers, max_lstm_layers + 1)
hyperparameters['lstm_dims'] = np.random.randint(
min_lstm_dims, max_lstm_dims + 1, number_of_lstm_layers).tolist()
return hyperparameters
def generate_base_hyper_parameter_set(
low_lr=1,
high_lr=4,
low_reg=1,
high_reg=4):
""" Generate a base set of hyperparameters that are necessary for any
model, but sufficient for none.
Parameters
----------
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
hyperparameters : dict
basis hyperpameters
"""
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(
low_reg, high_reg)
return hyperparameters
def get_learning_rate(low=1, high=4):
""" Return random learning rate 10^-n where n is sampled uniformly between
low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
learning_rate : float
learning rate
"""
result = 0.001 # Fixed learning rate for Adam #10 ** (-np.random.uniform(low, high))
return result
def get_regularization(low=1, high=4):
""" Return random regularization rate 10^-n where n is sampled uniformly
between low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
regularization_rate : float
regularization rate
"""
return 10 ** (-np.random.uniform(low, high))
| mcfly/modelgen.py | 18,305 | Generate a hyperparameter set that define a CNN model.
Parameters
----------
min_layers : int
minimum of Conv layers
max_layers : int
maximum of Conv layers
min_filters : int
minimum number of filters per Conv layer
max_filters : int
maximum number of filters per Conv layer
min_fc_nodes : int
minimum number of hidden nodes per Dense layer
max_fc_nodes : int
maximum number of hidden nodes per Dense layer
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters : dict
parameters for a CNN model
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
Generate a base set of hyperparameters that are necessary for any
model, but sufficient for none.
Parameters
----------
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
hyperparameters : dict
basis hyperpameters
Generate one or multiple untrained Keras models with random hyperparameters.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
number_of_classes : int
Number of classes for classification task
number_of_models : int
Number of models to generate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
model_type : str, optional
Type of model to build: 'CNN' or 'DeepConvLSTM'.
Default option None generates both models.
cnn_min_layers : int
minimum of Conv layers in CNN model
cnn_max_layers : int
maximum of Conv layers in CNN model
cnn_min_filters : int
minimum number of filters per Conv layer in CNN model
cnn_max_filters : int
maximum number of filters per Conv layer in CNN model
cnn_min_fc_nodes : int
minimum number of hidden nodes per Dense layer in CNN model
cnn_max_fc_nodes : int
maximum number of hidden nodes per Dense layer in CNN model
deepconvlstm_min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
deepconvlstm_max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
models : list
List of compiled models
Return random learning rate 10^-n where n is sampled uniformly between
low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
learning_rate : float
learning rate
Return random regularization rate 10^-n where n is sampled uniformly
between low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
regularization_rate : float
regularization rate
mcfly Copyright 2017 Netherlands eScience Center Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Comment on HPC random model choice: user-defined model choice: generate_model is a function number of samples in a time series number of channels number of classes weight initialization initialize model reshape a 2 dimensional array per file/person/object into a 3 dimensional array filt: number of filters used in a layer filters: vector of filt values reshape 3 dimensional array back into a 2 dimensional array, but now with more dept as we have the the filters for each channelmodel.add(LSTM(units=lstm_dim, return_sequences=True, activation='tanh')) comment following line for HPC dropout before the dense layer set up final dense layer such that every timestamp is given one classification model.add( TimeDistributed( Dense(units=output_dim, kernel_regularizer=l2(regularization_rate)))) model.add(Activation("softmax")) Final classification layer - per timestep model.add(Lambda(lambda x: x[:, -1, :], output_shape=[output_dim])) Pool output of all timesteps and perform classification using pooled output Final classification layer if class_number == 2: loss = 'binary_crossentropy' else: loss = 'categorical_crossentropy' number of samples in a time series number of channels number of classes weight initialization Fully connected layer Relu activation Final classification layer if class_number == 2: loss = 'binary_crossentropy' else: loss = 'categorical_crossentropy' Fixed learning rate for Adam 10 ** (-np.random.uniform(low, high)) | 9,677 | en | 0.717775 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functions to draw various pygimli matrices with matplotlib."""
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
def drawSparseMatrix(ax, mat, **kwargs):
"""Draw a view of a matrix into the axes.
Parameters
----------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.matrix.SparseMatrix or pg.matrix.SparseMapMatrix
Returns
-------
mpl.lines.line2d
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> from pygimli.viewer.mpl import drawSparseMatrix
>>> A = pg.randn((10,10), seed=0)
>>> SM = pg.core.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, i, 5.0)
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True, sharex=True)
>>> _ = drawSparseMatrix(ax1, A, colOffset=5, rowOffset=5, color='blue')
>>> _ = drawSparseMatrix(ax2, SM, color='green')
"""
row = kwargs.pop('rowOffset', 0)
col = kwargs.pop('colOffset', 0)
color = kwargs.pop('color', None)
mat = pg.utils.sparseMatrix2coo(mat)
mat.row += row
mat.col += col
gci = ax.spy(mat, color=color)
ax.autoscale(enable=True, axis='both', tight=True)
return gci
def drawBlockMatrix(ax, mat, **kwargs):
"""Draw a view of a matrix into the axes.
Arguments
---------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.Matrix.BlockMatrix
Keyword Arguments
-----------------
spy: bool [False]
Draw all matrix entries instead of colored blocks
Returns
-------
ax:
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> I = pg.matrix.IdentityMatrix(10)
>>> SM = pg.matrix.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, 10 - i, 5.0)
... SM.setVal(i, i, 5.0)
>>> B = pg.matrix.BlockMatrix()
>>> B.add(I, 0, 0)
0
>>> B.add(SM, 10, 10)
1
>>> print(B)
pg.matrix.BlockMatrix of size 20 x 21 consisting of 2 submatrices.
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True)
>>> _ = pg.show(B, ax=ax1)
>>> _ = pg.show(B, spy=True, ax=ax2)
"""
if kwargs.pop('spy', False):
gci = []
ids = pg.unique([e.matrixID for e in mat.entries()])
cMap = pg.plt.cm.get_cmap("Set3", len(ids))
for e in mat.entries():
mid = e.matrixID
mati = mat.mat(mid)
if isinstance(mati, pg.core.IdentityMatrix):
mati = np.eye(mati.size())
gci.append(drawSparseMatrix(ax, mati,
rowOffset=e.rowStart,
colOffset=e.colStart,
color=cMap(mid)))
return gci, None
else:
plcs = []
for e in mat.entries():
mid = e.matrixID
widthy = mat.mat(mid).rows() - 0.1 # to make sure non-matrix regions are not connected in the plot
widthx = mat.mat(mid).cols() - 0.1
plc = pg.meshtools.createRectangle([e.colStart, e.rowStart],
[e.colStart + widthx, e.rowStart + widthy],
marker=mid)
plcs.append(plc)
bm = pg.meshtools.mergePLC(plcs)
gci, cBar = pg.viewer.mpl.drawPLC(ax, bm, fitView=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
cBar.set_label("Matrix ID")
if len(mat.entries()) > 10:
gci.set_cmap("viridis")
return gci, cBar
| pygimli/viewer/mpl/matrixview.py | 3,685 | Draw a view of a matrix into the axes.
Arguments
---------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.Matrix.BlockMatrix
Keyword Arguments
-----------------
spy: bool [False]
Draw all matrix entries instead of colored blocks
Returns
-------
ax:
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> I = pg.matrix.IdentityMatrix(10)
>>> SM = pg.matrix.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, 10 - i, 5.0)
... SM.setVal(i, i, 5.0)
>>> B = pg.matrix.BlockMatrix()
>>> B.add(I, 0, 0)
0
>>> B.add(SM, 10, 10)
1
>>> print(B)
pg.matrix.BlockMatrix of size 20 x 21 consisting of 2 submatrices.
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True)
>>> _ = pg.show(B, ax=ax1)
>>> _ = pg.show(B, spy=True, ax=ax2)
Draw a view of a matrix into the axes.
Parameters
----------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.matrix.SparseMatrix or pg.matrix.SparseMapMatrix
Returns
-------
mpl.lines.line2d
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> from pygimli.viewer.mpl import drawSparseMatrix
>>> A = pg.randn((10,10), seed=0)
>>> SM = pg.core.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, i, 5.0)
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True, sharex=True)
>>> _ = drawSparseMatrix(ax1, A, colOffset=5, rowOffset=5, color='blue')
>>> _ = drawSparseMatrix(ax2, SM, color='green')
Functions to draw various pygimli matrices with matplotlib.
!/usr/bin/env python -*- coding: utf-8 -*- to make sure non-matrix regions are not connected in the plot | 1,648 | en | 0.515833 |
"""This is the core module for accessing using and accessing the bot"""
from .core import Bot
| bot/__init__.py | 95 | This is the core module for accessing using and accessing the bot | 65 | en | 0.64874 |
from abaqusConstants import *
from .BoundaryConditionState import BoundaryConditionState
class DisplacementBaseMotionBCState(BoundaryConditionState):
"""The DisplacementBaseMotionBCState object stores the propagating data for a velocity base
motion boundary condition in a step. One instance of this object is created internally
by the DisplacementBaseMotionBC object for each step. The instance is also deleted
internally by the DisplacementBaseMotionBC object.
The DisplacementBaseMotionBCState object has no constructor or methods.
The DisplacementBaseMotionBCState object is derived from the BoundaryConditionState
object.
Attributes
----------
amplitudeState: SymbolicConstant
A SymbolicConstant specifying the propagation state of the amplitude reference. Possible
values are UNSET, SET, UNCHANGED, FREED, and MODIFIED.
status: SymbolicConstant
A SymbolicConstant specifying the propagation state of the :py:class:`~abaqus.BoundaryCondition.BoundaryConditionState.BoundaryConditionState` object. Possible values are:
NOT_YET_ACTIVE
CREATED
PROPAGATED
MODIFIED
DEACTIVATED
NO_LONGER_ACTIVE
TYPE_NOT_APPLICABLE
INSTANCE_NOT_APPLICABLE
PROPAGATED_FROM_BASE_STATE
MODIFIED_FROM_BASE_STATE
DEACTIVATED_FROM_BASE_STATE
BUILT_INTO_MODES
amplitude: str
A String specifying the name of the amplitude reference. The String is empty if the
boundary condition has no amplitude reference.
Notes
-----
This object can be accessed by:
.. code-block:: python
import load
mdb.models[name].steps[name].boundaryConditionStates[name]
The corresponding analysis keywords are:
- BASE MOTION
"""
# A SymbolicConstant specifying the propagation state of the amplitude reference. Possible
# values are UNSET, SET, UNCHANGED, FREED, and MODIFIED.
amplitudeState: SymbolicConstant = None
# A SymbolicConstant specifying the propagation state of the BoundaryConditionState object. Possible values are:
# NOT_YET_ACTIVE
# CREATED
# PROPAGATED
# MODIFIED
# DEACTIVATED
# NO_LONGER_ACTIVE
# TYPE_NOT_APPLICABLE
# INSTANCE_NOT_APPLICABLE
# PROPAGATED_FROM_BASE_STATE
# MODIFIED_FROM_BASE_STATE
# DEACTIVATED_FROM_BASE_STATE
# BUILT_INTO_MODES
status: SymbolicConstant = None
# A String specifying the name of the amplitude reference. The String is empty if the
# boundary condition has no amplitude reference.
amplitude: str = ''
| src/abaqus/BoundaryCondition/DisplacementBaseMotionBCState.py | 2,646 | The DisplacementBaseMotionBCState object stores the propagating data for a velocity base
motion boundary condition in a step. One instance of this object is created internally
by the DisplacementBaseMotionBC object for each step. The instance is also deleted
internally by the DisplacementBaseMotionBC object.
The DisplacementBaseMotionBCState object has no constructor or methods.
The DisplacementBaseMotionBCState object is derived from the BoundaryConditionState
object.
Attributes
----------
amplitudeState: SymbolicConstant
A SymbolicConstant specifying the propagation state of the amplitude reference. Possible
values are UNSET, SET, UNCHANGED, FREED, and MODIFIED.
status: SymbolicConstant
A SymbolicConstant specifying the propagation state of the :py:class:`~abaqus.BoundaryCondition.BoundaryConditionState.BoundaryConditionState` object. Possible values are:
NOT_YET_ACTIVE
CREATED
PROPAGATED
MODIFIED
DEACTIVATED
NO_LONGER_ACTIVE
TYPE_NOT_APPLICABLE
INSTANCE_NOT_APPLICABLE
PROPAGATED_FROM_BASE_STATE
MODIFIED_FROM_BASE_STATE
DEACTIVATED_FROM_BASE_STATE
BUILT_INTO_MODES
amplitude: str
A String specifying the name of the amplitude reference. The String is empty if the
boundary condition has no amplitude reference.
Notes
-----
This object can be accessed by:
.. code-block:: python
import load
mdb.models[name].steps[name].boundaryConditionStates[name]
The corresponding analysis keywords are:
- BASE MOTION
A SymbolicConstant specifying the propagation state of the amplitude reference. Possible values are UNSET, SET, UNCHANGED, FREED, and MODIFIED. A SymbolicConstant specifying the propagation state of the BoundaryConditionState object. Possible values are: NOT_YET_ACTIVE CREATED PROPAGATED MODIFIED DEACTIVATED NO_LONGER_ACTIVE TYPE_NOT_APPLICABLE INSTANCE_NOT_APPLICABLE PROPAGATED_FROM_BASE_STATE MODIFIED_FROM_BASE_STATE DEACTIVATED_FROM_BASE_STATE BUILT_INTO_MODES A String specifying the name of the amplitude reference. The String is empty if the boundary condition has no amplitude reference. | 2,117 | en | 0.690103 |
from abc import abstractmethod, ABCMeta
from collections import deque
from functools import partial
from plenum.common.constants import VIEW_CHANGE_START, PreVCStrategies, VIEW_CHANGE_CONTINUE
from plenum.common.messages.node_messages import ViewChangeStartMessage, ViewChangeContinueMessage, PrePrepare, Prepare, \
Commit, Ordered
from stp_zmq.zstack import Quota
from stp_core.common.log import getlogger
logger = getlogger()
class PreViewChangeStrategy(metaclass=ABCMeta):
"""Abstract class for routines before starting viewChange procedure"""
def __init__(self, view_changer, node):
self.view_changer = view_changer
self.node = node
@abstractmethod
def prepare_view_change(self, proposed_view_no: int):
raise NotImplementedError()
@staticmethod
@abstractmethod
def on_view_change_started(obj, msg, frm):
raise NotImplementedError()
@staticmethod
@abstractmethod
def on_view_change_continued(obj, msg):
raise NotImplementedError()
@abstractmethod
def on_strategy_complete(self):
raise NotImplementedError()
class VCStartMsgStrategy(PreViewChangeStrategy):
"""Strategy logic:
- when startViewChange method was called, then put 'local' ViewChangeStart message and set corresponded handlers
- on processing startViewChange message on the nodeInBoxRouter's side the next steps will be performed:
- call nodestack.service method with extended quota parameters for getting as much as possible 3PC
messages from ZMQ's side
- process all messages from nodeInBox queue and stash all not 3PC
- append to replica's inBox queue ViewChangeContinueMessage
- then replica's inBox queue will be processed and after ViewChangeContinueMessage view_change procedure
will be continued in the normal way
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stashedNodeInBox = deque()
self.replica = self.node.master_replica
self.is_preparing = False
def prepare_view_change(self, proposed_view_no: int):
if not self.is_preparing:
logger.info("VCStartMsgStrategy: Starting prepare_view_change process")
self._set_req_handlers()
vcs_msg = ViewChangeStartMessage(proposed_view_no)
nodeInBox = self.node.nodeInBox
nodeInBox.append((vcs_msg, self.node.name))
self.is_preparing = True
def on_strategy_complete(self):
logger.info("VCStartMsgStrategy: on_strategy_complete - View Change can be started")
self.unstash_messages()
self.is_preparing = False
@staticmethod
async def _process_node_inbox_3PC(node):
current_view_no = node.viewNo
stashed_not_3PC = deque()
types_3PC = (PrePrepare, Prepare, Commit, Ordered)
while node.nodeInBox:
m = node.nodeInBox.popleft()
if len(m) == 2 and isinstance(m[0], types_3PC) and \
m[0].viewNo == current_view_no and \
m[0].instId == node.instances.masterId:
await node.process_one_node_message(m)
else:
stashed_not_3PC.append(m)
return stashed_not_3PC
"""Handler for processing ViewChangeStart message on node's nodeInBoxRouter"""
@staticmethod
async def on_view_change_started(node, msg: ViewChangeStartMessage, frm):
strategy = node.view_changer.pre_vc_strategy
proposed_view_no = msg.proposed_view_no
logger.info("VCStartMsgStrategy: got ViewChangeStartMessage with proposed_view_no: {}".format(proposed_view_no))
if proposed_view_no > node.view_changer.view_no:
vcc_msg = ViewChangeContinueMessage(proposed_view_no)
quota = Quota(
count=node.config.EXTENDED_QUOTA_MULTIPLIER_BEFORE_VC * node.quota_control.node_quota.count,
size=node.config.EXTENDED_QUOTA_MULTIPLIER_BEFORE_VC * node.quota_control.node_quota.size)
msgs_count = await node.nodestack.service(limit=None,
quota=quota)
logger.info("VCStartMsgStrategy: Got {} messages from nodestack".format(msgs_count))
strategy.stashedNodeInBox = await VCStartMsgStrategy._process_node_inbox_3PC(node)
logger.info("VCStartMsgStrategy: {} not 3PC msgs was stashed".format(len(strategy.stashedNodeInBox)))
node.master_replica.inBox.append(vcc_msg)
"""Handler for processing ViewChangeStart message on replica's inBoxRouter"""
@staticmethod
def on_view_change_continued(replica, msg: ViewChangeContinueMessage):
strategy = replica.node.view_changer.pre_vc_strategy
proposed_view_no = msg.proposed_view_no
replica.logger.info("VCStartMsgStrategy: got ViewChangeContinueMessage with proposed_view_no: {}".format(proposed_view_no))
if proposed_view_no > replica.node.viewNo:
"""
Return stashed not 3PC msgs to nodeInBox queue and start ViewChange
Critical assumption: All 3PC msgs passed from node already processed
"""
strategy.unstash_messages()
replica.logger.info("VCStartMsgStrategy: continue view_change procedure in a normal way")
replica.node.view_changer.startViewChange(proposed_view_no, continue_vc=True)
strategy.is_preparing = False
def unstash_messages(self):
logger.info("VCStartMsgStrategy: unstash all not 3PC msgs to nodeInBox queue")
while self.stashedNodeInBox:
self.node.nodeInBox.appendleft(self.stashedNodeInBox.pop())
def _set_req_handlers(self):
node_msg_router = self.node.nodeMsgRouter
replica_msg_router = self.replica.inBoxRouter
if ViewChangeStartMessage not in node_msg_router.routes:
processor = partial(VCStartMsgStrategy.on_view_change_started,
self.node)
node_msg_router.add((ViewChangeStartMessage, processor))
if ViewChangeContinueMessage not in replica_msg_router.routes:
processor = partial(VCStartMsgStrategy.on_view_change_continued,
self.replica)
replica_msg_router.add((ViewChangeContinueMessage, processor))
preVCStrategies = {
PreVCStrategies.VC_START_MSG_STRATEGY: VCStartMsgStrategy
}
| plenum/server/view_change/pre_view_change_strategies.py | 6,439 | Abstract class for routines before starting viewChange procedure
Strategy logic:
- when startViewChange method was called, then put 'local' ViewChangeStart message and set corresponded handlers
- on processing startViewChange message on the nodeInBoxRouter's side the next steps will be performed:
- call nodestack.service method with extended quota parameters for getting as much as possible 3PC
messages from ZMQ's side
- process all messages from nodeInBox queue and stash all not 3PC
- append to replica's inBox queue ViewChangeContinueMessage
- then replica's inBox queue will be processed and after ViewChangeContinueMessage view_change procedure
will be continued in the normal way | 708 | en | 0.807794 |
#!/usr/bin/env python3
import os
from aws_cdk import core as cdk
# For consistency with TypeScript code, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import core
from aws_securityhub_falco_ecs_eks_integration.aws_securityhub_falco_ecs_eks_integration_stack import AwsSecurityhubFalcoEcsEksIntegrationStack
app = core.App()
AwsSecurityhubFalcoEcsEksIntegrationStack(app, "AwsSecurityhubFalcoEcsEksIntegrationStack",
# If you don't specify 'env', this stack will be environment-agnostic.
# Account/Region-dependent features and context lookups will not work,
# but a single synthesized template can be deployed anywhere.
# Uncomment the next line to specialize this stack for the AWS Account
# and Region that are implied by the current CLI configuration.
#env=core.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')),
# Uncomment the next line if you know exactly what Account and Region you
# want to deploy the stack to. */
#env=core.Environment(account='123456789012', region='us-east-1'),
# For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html
)
app.synth()
| app.py | 1,436 | !/usr/bin/env python3 For consistency with TypeScript code, `cdk` is the preferred import name for the CDK's core module. The following line also imports it as `core` for use with examples from the CDK Developer's Guide, which are in the process of being updated to use `cdk`. You may delete this import if you don't need it. If you don't specify 'env', this stack will be environment-agnostic. Account/Region-dependent features and context lookups will not work, but a single synthesized template can be deployed anywhere. Uncomment the next line to specialize this stack for the AWS Account and Region that are implied by the current CLI configuration.env=core.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), Uncomment the next line if you know exactly what Account and Region you want to deploy the stack to. */env=core.Environment(account='123456789012', region='us-east-1'), For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html | 1,017 | en | 0.825708 |
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import logging
from tests import conftest
from core.test_run import TestRun
from api.cas import git
from api.cas import cas_module
from test_utils import os_utils
from test_utils.output import CmdException
def rsync_opencas_sources():
TestRun.LOGGER.info("Copying Open CAS repository to DUT")
TestRun.executor.rsync_to(
f"{TestRun.usr.repo_dir}/",
f"{TestRun.usr.working_dir}/",
exclude_list=["test/functional/results/"],
delete=True)
def _clean_opencas_repo():
TestRun.LOGGER.info("Cleaning Open CAS repo")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
"make distclean")
if output.exit_code != 0:
raise CmdException("make distclean command executed with nonzero status", output)
def build_opencas():
TestRun.LOGGER.info("Building Open CAS")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
"./configure && "
"make -j")
if output.exit_code != 0:
raise CmdException("Make command executed with nonzero status", output)
def install_opencas():
TestRun.LOGGER.info("Installing Open CAS")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
f"make install")
if output.exit_code != 0:
raise CmdException("Error while installing Open CAS", output)
TestRun.LOGGER.info("Check if casadm is properly installed.")
output = TestRun.executor.run("casadm -V")
if output.exit_code != 0:
raise CmdException("'casadm -V' command returned an error", output)
else:
TestRun.LOGGER.info(output.stdout)
def set_up_opencas(version=None):
_clean_opencas_repo()
if version:
git.checkout_cas_version(version)
build_opencas()
install_opencas()
def uninstall_opencas():
TestRun.LOGGER.info("Uninstalling Open CAS")
output = TestRun.executor.run("casadm -V")
if output.exit_code != 0:
raise CmdException("Open CAS is not properly installed", output)
else:
TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
f"make uninstall")
if output.exit_code != 0:
raise CmdException("There was an error during uninstall process", output)
def reinstall_opencas(version=None):
if check_if_installed():
uninstall_opencas()
set_up_opencas(version)
def check_if_installed():
TestRun.LOGGER.info("Check if Open-CAS-Linux is installed")
output = TestRun.executor.run("which casadm")
modules_loaded = os_utils.is_kernel_module_loaded(cas_module.CasModule.cache.value)
if output.exit_code == 0 and modules_loaded:
TestRun.LOGGER.info("CAS is installed")
return True
TestRun.LOGGER.info("CAS not installed")
return False
| test/functional/api/cas/installer.py | 2,887 | Copyright(c) 2019-2021 Intel Corporation SPDX-License-Identifier: BSD-3-Clause | 78 | en | 0.288062 |
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import os
import sys
from tqdm import tqdm
from pyserini.dsearch import SimpleDenseSearcher
from pyserini.query_iterator import get_query_iterator, TopicsFormat
from pyserini.output_writer import get_output_writer, OutputFormat
from pyserini.search import ImpactSearcher, SimpleSearcher
from pyserini.hsearch import HybridSearcher
from pyserini.dsearch.__main__ import define_dsearch_args, init_query_encoder
from pyserini.search.__main__ import define_search_args, set_bm25_parameters
# Fixes this error: "OMP: Error #15: Initializing libomp.a, but found libomp.dylib already initialized."
# https://stackoverflow.com/questions/53014306/error-15-initializing-libiomp5-dylib-but-found-libiomp5-dylib-already-initial
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
def define_fusion_args(parser):
parser.add_argument('--alpha', type=float, metavar='num', required=False, default=0.1,
help="alpha for hybrid search")
parser.add_argument('--hits', type=int, required=False, default=10, help='number of hits from dense and sparse')
parser.add_argument('--normalization', action='store_true', required=False, help='hybrid score with normalization')
parser.add_argument('--weight-on-dense', action='store_true', required=False, help='weight on dense part')
def parse_args(parser, commands):
# Divide argv by commands
split_argv = [[]]
for c in sys.argv[1:]:
if c in commands.choices:
split_argv.append([c])
else:
split_argv[-1].append(c)
# Initialize namespace
args = argparse.Namespace()
for c in commands.choices:
setattr(args, c, None)
# Parse each command
parser.parse_args(split_argv[0], namespace=args) # Without command
for argv in split_argv[1:]: # Commands
n = argparse.Namespace()
setattr(args, argv[0], n)
parser.parse_args(argv, namespace=n)
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Conduct a hybrid search on dense+sparse indexes.')
commands = parser.add_subparsers(title='sub-commands')
dense_parser = commands.add_parser('dense')
define_dsearch_args(dense_parser)
sparse_parser = commands.add_parser('sparse')
define_search_args(sparse_parser)
fusion_parser = commands.add_parser('fusion')
define_fusion_args(fusion_parser)
run_parser = commands.add_parser('run')
run_parser.add_argument('--topics', type=str, metavar='topic_name', required=False,
help="Name of topics. Available: msmarco-passage-dev-subset.")
run_parser.add_argument('--hits', type=int, metavar='num', required=False, default=1000, help="Number of hits.")
run_parser.add_argument('--topics-format', type=str, metavar='format', default=TopicsFormat.DEFAULT.value,
help=f"Format of topics. Available: {[x.value for x in list(TopicsFormat)]}")
run_parser.add_argument('--output-format', type=str, metavar='format', default=OutputFormat.TREC.value,
help=f"Format of output. Available: {[x.value for x in list(OutputFormat)]}")
run_parser.add_argument('--output', type=str, metavar='path', required=False, help="Path to output file.")
run_parser.add_argument('--max-passage', action='store_true',
default=False, help="Select only max passage from document.")
run_parser.add_argument('--max-passage-hits', type=int, metavar='num', required=False, default=100,
help="Final number of hits when selecting only max passage.")
run_parser.add_argument('--max-passage-delimiter', type=str, metavar='str', required=False, default='#',
help="Delimiter between docid and passage id.")
run_parser.add_argument('--batch-size', type=int, metavar='num', required=False,
default=1, help="Specify batch size to search the collection concurrently.")
run_parser.add_argument('--threads', type=int, metavar='num', required=False,
default=1, help="Maximum number of threads to use.")
args = parse_args(parser, commands)
query_iterator = get_query_iterator(args.run.topics, TopicsFormat(args.run.topics_format))
topics = query_iterator.topics
query_encoder = init_query_encoder(args.dense.encoder,
args.dense.tokenizer,
args.run.topics,
args.dense.encoded_queries,
args.dense.device,
args.dense.query_prefix)
if os.path.exists(args.dense.index):
# create searcher from index directory
dsearcher = SimpleDenseSearcher(args.dense.index, query_encoder)
else:
# create searcher from prebuilt index name
dsearcher = SimpleDenseSearcher.from_prebuilt_index(args.dense.index, query_encoder)
if not dsearcher:
exit()
if os.path.exists(args.sparse.index):
# create searcher from index directory
if args.sparse.impact:
ssearcher = ImpactSearcher(args.sparse.index, args.sparse.encoder, args.sparse.min_idf)
else:
ssearcher = SimpleSearcher(args.sparse.index)
else:
# create searcher from prebuilt index name
if args.sparse.impact:
ssearcher = ImpactSearcher.from_prebuilt_index(args.sparse.index, args.sparse.encoder, args.sparse.min_idf)
else:
ssearcher = SimpleSearcher.from_prebuilt_index(args.sparse.index)
if not ssearcher:
exit()
set_bm25_parameters(ssearcher, args.sparse.index, args.sparse.k1, args.sparse.b)
if args.sparse.language != 'en':
ssearcher.set_language(args.sparse.language)
hsearcher = HybridSearcher(dsearcher, ssearcher)
if not hsearcher:
exit()
# build output path
output_path = args.run.output
print(f'Running {args.run.topics} topics, saving to {output_path}...')
tag = 'hybrid'
output_writer = get_output_writer(output_path, OutputFormat(args.run.output_format), 'w',
max_hits=args.run.hits, tag=tag, topics=topics,
use_max_passage=args.run.max_passage,
max_passage_delimiter=args.run.max_passage_delimiter,
max_passage_hits=args.run.max_passage_hits)
with output_writer:
batch_topics = list()
batch_topic_ids = list()
for index, (topic_id, text) in enumerate(tqdm(query_iterator, total=len(topics.keys()))):
if args.run.batch_size <= 1 and args.run.threads <= 1:
hits = hsearcher.search(text, args.fusion.hits, args.run.hits, args.fusion.alpha, args.fusion.normalization, args.fusion.weight_on_dense)
results = [(topic_id, hits)]
else:
batch_topic_ids.append(str(topic_id))
batch_topics.append(text)
if (index + 1) % args.run.batch_size == 0 or \
index == len(topics.keys()) - 1:
results = hsearcher.batch_search(
batch_topics, batch_topic_ids, args.fusion.hits, args.run.hits, args.run.threads,
args.fusion.alpha, args.fusion.normalization, args.fusion.weight_on_dense)
results = [(id_, results[id_]) for id_ in batch_topic_ids]
batch_topic_ids.clear()
batch_topics.clear()
else:
continue
for topic, hits in results:
output_writer.write(topic, hits)
results.clear()
| pyserini/hsearch/__main__.py | 8,453 | Pyserini: Reproducible IR research with sparse and dense representations Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Fixes this error: "OMP: Error 15: Initializing libomp.a, but found libomp.dylib already initialized." https://stackoverflow.com/questions/53014306/error-15-initializing-libiomp5-dylib-but-found-libiomp5-dylib-already-initial Divide argv by commands Initialize namespace Parse each command Without command Commands create searcher from index directory create searcher from prebuilt index name create searcher from index directory create searcher from prebuilt index name build output path | 1,078 | en | 0.765018 |
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from sentry.models import GroupTagValue, TagKey, TagValue
from sentry.testutils import TestCase
class GroupTagExportTest(TestCase):
def test_simple(self):
key, value = 'foo', 'bar'
# Drop microsecond value for MySQL
now = timezone.now().replace(microsecond=0)
project = self.create_project()
group = self.create_group(project=project)
TagKey.objects.create(project=project, key=key)
TagValue.objects.create(
project=project,
key=key,
value=value,
)
group_tag_value = GroupTagValue.objects.create(
project=project,
group=group,
key=key,
value=value,
times_seen=1,
first_seen=now - timedelta(hours=1),
last_seen=now,
)
self.login_as(user=self.user)
url = '/{}/{}/issues/{}/tags/{}/export/'.format(
project.organization.slug, project.slug, group.id, key
)
response = self.client.get(url)
assert response.status_code == 200
assert response.streaming
assert response['Content-Type'] == 'text/csv'
rows = list(response.streaming_content)
for idx, row in enumerate(rows):
row = row.decode('utf-8')
assert row.endswith(u'\r\n')
bits = row[:-2].split(',')
if idx == 0:
assert bits == ['value', 'times_seen', 'last_seen', 'first_seen']
else:
assert bits[0] == value
assert bits[1] == '1'
assert bits[2] == group_tag_value.last_seen.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
assert bits[3] == group_tag_value.first_seen.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
| tests/sentry/web/frontend/test_group_tag_export.py | 1,877 | Drop microsecond value for MySQL | 32 | en | 0.571744 |
ifconfig -a | grep PROMISC
cat /var/log/messages |grep promisc
1 #!/usr/bin/python 2 3 import sys 4 from scapy.all import promiscping 5 6 if len(sys.argv) < 2: 7 print sys.argv[0] + " <net>" 8 sys.exit() 9 10 promiscping(sys.argv[1]) | 05_tcp_ip_tricks/Sniffer Detection.py | 237 | !/usr/bin/python 2 3 import sys 4 from scapy.all import promiscping 5 6 if len(sys.argv) < 2: 7 print sys.argv[0] + " <net>" 8 sys.exit() 9 10 promiscping(sys.argv[1]) | 167 | en | 0.170636 |
# -*- coding: utf-8 -*-
from functools import cache
INPUT = 33100000
def sigma_pentagonal_numbers(limit):
"""
>>> list(sigma_pentagonal_numbers(16))
[1, 2, 5, 7, 12, 15]
"""
n = 1
p = 1
while p <= limit:
yield p
if n > 0:
n = -n
else:
n = -n + 1
p = (3 * n * n - n) // 2
def sigma_sign_generator():
while True:
yield 1
yield 1
yield -1
yield -1
@cache
def presents_for_house(house):
"""
https://math.stackexchange.com/a/22744
>>> presents_for_house(1)
10
>>> presents_for_house(2)
30
>>> presents_for_house(3)
40
>>> presents_for_house(8)
150
>>> presents_for_house(9)
130
"""
if house == 1:
return 10
presents = 0
sign = sigma_sign_generator()
for p in sigma_pentagonal_numbers(house):
n = house - p
if n == 0:
presents += house * next(sign) * 10
else:
presents += presents_for_house(n) * next(sign)
return presents
def part1(data):
"""
# Takes too long so commented out
# >>> part1(INPUT)
# 776160
"""
house = 0
presents = 0
max = 0
while presents < data:
house += 1
presents = presents_for_house(house)
if presents > max:
max = presents
print(max)
return house
def part2(data):
"""
>>> part2(INPUT)
786240
"""
upper_limit = INPUT
house = [0] * (upper_limit + 1)
elf = 1
while elf <= upper_limit:
elf_end = min(elf * 50, upper_limit)
for number in range(elf, elf_end + 1, elf):
index = number - 1
house[index] += 11 * elf
if house[index] >= data:
upper_limit = min(number, upper_limit)
elf += 1
for i, value in enumerate(house):
if value >= data:
return i + 1
raise ValueError()
def main():
print(part1(INPUT))
print(part2(INPUT))
if __name__ == "__main__":
main()
| advent/year2015/day20.py | 2,091 | # Takes too long so commented out
# >>> part1(INPUT)
# 776160
>>> part2(INPUT)
786240
https://math.stackexchange.com/a/22744
>>> presents_for_house(1)
10
>>> presents_for_house(2)
30
>>> presents_for_house(3)
40
>>> presents_for_house(8)
150
>>> presents_for_house(9)
130
>>> list(sigma_pentagonal_numbers(16))
[1, 2, 5, 7, 12, 15]
-*- coding: utf-8 -*- | 356 | en | 0.689199 |
import datetime
import functools
import os
import subprocess
def get_version(version=None):
"""Return a PEP 440-compliant version number from VERSION."""
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta, and rc releases
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_main_version(version=None):
"""Return main version (X.Y[.Z]) from VERSION."""
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""
Return a tuple of the django version. If version argument is non-empty,
check for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@functools.lru_cache()
def get_git_changeset():
"""Return a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| django-src/utils/version.py | 2,393 | Return a tuple of the django version. If version argument is non-empty,
check for correctness of the tuple provided.
Return a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
Return main version (X.Y[.Z]) from VERSION.
Return a PEP 440-compliant version number from VERSION.
Now build the two parts of the version number: main = X.Y[.Z] sub = .devN - for pre-alpha releases | {a|b|rc}N - for alpha, beta, and rc releases | 644 | en | 0.78733 |
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
###########################################################################
#
# Copyright (c) 2018 www.codingchen.com, Inc. All Rights Reserved
#
##########################################################################
'''
@brief leetcode algorithm
@author chenhui(hui.chen6789@gmail.com)
@date 2018/11/07 21:30:33
'''
class Solution:
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
odd = (len(nums1) + len(nums2)) % 2
if odd:
half = (len(nums1) + len(nums2)) // 2
else:
half = (len(nums1) + len(nums2)) // 2 - 1
for _ in range(half):
__ = self.pop_num(nums1, nums2)
if odd:
return float(self.pop_num(nums1, nums2))
else:
t1 = self.pop_num(nums1, nums2)
t2 = self.pop_num(nums1, nums2)
return (t1 + t2) / 2
def pop_num(self, nums1, nums2):
if len(nums1) == 0:
return nums2.pop(0)
elif len(nums2) == 0:
return nums1.pop(0)
elif nums1[0] > nums2[0]:
return nums2.pop(0)
elif nums1[0] <= nums2[0]:
return nums1.pop(0)
if __name__ == '__main__':
s = Solution()
nums1 = [1, 2]
nums2 = [3, 4]
print(s.findMedianSortedArrays(nums1, nums2)) | 4+Median+of+Two+Sorted+Arrays/alg.py | 1,429 | :type nums1: List[int]
:type nums2: List[int]
:rtype: float
@brief leetcode algorithm
@author chenhui(hui.chen6789@gmail.com)
@date 2018/11/07 21:30:33
!/usr/bin/env python-*- coding: UTF-8 -*- Copyright (c) 2018 www.codingchen.com, Inc. All Rights Reserved | 258 | en | 0.351762 |
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VMRC console drivers."""
import base64
from oslo.config import cfg
from nova import exception
from nova.openstack.common import jsonutils
from nova.virt.vmwareapi import vim_util
vmrc_opts = [
cfg.IntOpt('console_vmrc_port',
default=443,
help="Port for VMware VMRC connections"),
cfg.IntOpt('console_vmrc_error_retries',
default=10,
help="Number of retries for retrieving VMRC information"),
]
CONF = cfg.CONF
CONF.register_opts(vmrc_opts)
class VMRCConsole(object):
"""VMRC console driver with ESX credentials."""
def __init__(self):
super(VMRCConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+credentials'
def get_port(self, context):
"""Get available port for consoles."""
return CONF.console_vmrc_port
def setup_console(self, context, console):
"""Sets up console."""
pass
def teardown_console(self, context, console):
"""Tears down console."""
pass
def init_host(self):
"""Perform console initialization."""
pass
def fix_pool_password(self, password):
"""Encode password."""
# TODO(sateesh): Encrypt pool password
return password
def generate_password(self, vim_session, pool, instance_name):
"""Returns VMRC Connection credentials.
Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.
"""
username, password = pool['username'], pool['password']
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name', 'config.files.vmPathName'])
vm_ds_path_name = None
vm_ref = None
for vm in vms:
vm_name = None
ds_path_name = None
for prop in vm.propSet:
if prop.name == 'name':
vm_name = prop.val
elif prop.name == 'config.files.vmPathName':
ds_path_name = prop.val
if vm_name == instance_name:
vm_ref = vm.obj
vm_ds_path_name = ds_path_name
break
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
json_data = jsonutils.dumps({'vm_id': vm_ds_path_name,
'username': username,
'password': password})
return base64.b64encode(json_data)
def is_otp(self):
"""Is one time password or not."""
return False
class VMRCSessionConsole(VMRCConsole):
"""VMRC console driver with VMRC One Time Sessions."""
def __init__(self):
super(VMRCSessionConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+session'
def generate_password(self, vim_session, pool, instance_name):
"""Returns a VMRC Session.
Return string is of the form '<VM MOID>:<VMRC Ticket>'.
"""
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name'])
vm_ref = None
for vm in vms:
if vm.propSet[0].val == instance_name:
vm_ref = vm.obj
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
virtual_machine_ticket = vim_session._call_method(
vim_session._get_vim(),
'AcquireCloneTicket',
vim_session._get_vim().get_service_content().sessionManager)
json_data = jsonutils.dumps({'vm_id': str(vm_ref.value),
'username': virtual_machine_ticket,
'password': virtual_machine_ticket})
return base64.b64encode(json_data)
def is_otp(self):
"""Is one time password or not."""
return True
| nova/console/vmrc.py | 4,588 | VMRC console driver with ESX credentials.
VMRC console driver with VMRC One Time Sessions.
Encode password.
Returns VMRC Connection credentials.
Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.
Returns a VMRC Session.
Return string is of the form '<VM MOID>:<VMRC Ticket>'.
Get available port for consoles.
Perform console initialization.
Is one time password or not.
Is one time password or not.
Sets up console.
Tears down console.
VMRC console drivers.
Copyright (c) 2011 Citrix Systems, Inc. Copyright 2011 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. TODO(sateesh): Encrypt pool password | 1,145 | en | 0.81166 |
# Copyright 2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "res.settings")
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| res/manage.py | 804 | Copyright 2017 ZTE Corporation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 557 | en | 0.86807 |
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import numba
import numpy as np
import argparse
import time
@numba.njit()
def linear_regression(Y, X, w, iterations, alphaN):
for i in range(iterations):
w -= alphaN * np.dot(X.T, np.dot(X,w)-Y)
return w
def main():
parser = argparse.ArgumentParser(description='Linear Regression.')
parser.add_argument('--samples', dest='samples', type=int, default=200000)
parser.add_argument('--features', dest='features', type=int, default=10)
parser.add_argument('--functions', dest='functions', type=int, default=4)
parser.add_argument('--iterations', dest='iterations', type=int, default=20)
args = parser.parse_args()
N = args.samples
D = args.features
p = args.functions
iterations = args.iterations
alphaN = 0.01/N
w = np.zeros((D,p))
np.random.seed(0)
points = np.random.random((N,D))
labels = np.random.random((N,p))
t1 = time.time()
w = linear_regression(labels, points, w, iterations, alphaN)
selftimed = time.time()-t1
print("SELFTIMED ", selftimed)
print("checksum: ", np.sum(w))
if __name__ == '__main__':
main()
| examples/linear_regression/linear_regression_numba.py | 1,200 | Copyright (c) 2017 Intel Corporation SPDX-License-Identifier: BSD-2-Clause | 74 | en | 0.441239 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import re
import sphinx_rtd_theme
import subprocess as sp
# -- Project information -----------------------------------------------------
project = 'CubismNova'
copyright = 'ETH Zurich'
author = 'Fabian Wermelinger'
sp.run('(cd .. && doxygen)', shell=True) # compile the xml source
v = str(sp.check_output('git describe --abbrev=0', shell=True)) # get version
# The short X.Y version
version = '.'.join(v.split('.')[:2])
# The full version, including alpha/beta/rc tags
release = v
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx_rtd_theme',
'sphinxcontrib.bibtex',
'breathe',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# breathe extension
breathe_default_project = "CubismNova"
breathe_projects = {
"CubismNova": "../doxygen/xml"
}
breathe_domain_by_extension = { "h" : "cpp", "cu" : "cpp" }
cpp_id_attributes = ['__device__', '__global__', '__host__']
cpp_paren_attributes = ['__align__']
# Tell sphinx what the primary language being documented is
primary_domain = 'cpp'
# Tell sphinx what the pygments highlight language should be
highlight_language = 'cpp'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_title = "CubismNova Documentation"
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CubismNovadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CubismNova.tex', 'CubismNova Documentation',
'Fabian Wermelinger', 'manual'),
]
# BibTeX files
bibtex_bibfiles = ['bibtex/references.bib']
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cubismnova', 'CubismNova Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CubismNova', 'CubismNova Documentation',
author, 'CubismNova', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| docs/source/conf.py | 6,878 | -*- coding: utf-8 -*- Configuration file for the Sphinx documentation builder. This file does only contain a selection of the most common options. For a full list see the documentation: http://www.sphinx-doc.org/en/master/config -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys sys.path.insert(0, os.path.abspath('.')) -- Project information ----------------------------------------------------- compile the xml source get version The short X.Y version The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The master toctree document. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. The name of the Pygments (syntax highlighting) style to use. breathe extension Tell sphinx what the primary language being documented is Tell sphinx what the pygments highlight language should be -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". If false, no module index is generated. If false, no index is generated. If true, the index is split into individual pages for each letter. If true, links to the reST sources are added to the pages. If true, "Created using Sphinx" is shown in the HTML footer. Default is True. If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Custom sidebar templates, must be a dictionary that maps document names to template names. The default sidebars (for documents that don't match any pattern) are defined by theme itself. Builtin themes are using these templates by default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']``. html_sidebars = {} -- Options for HTMLHelp output --------------------------------------------- Output file base name for HTML help builder. -- Options for LaTeX output ------------------------------------------------ The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). BibTeX files -- Options for manual page output ------------------------------------------ One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ---------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) -- Options for Epub output ------------------------------------------------- Bibliographic Dublin Core info. The unique identifier of the text. This can be a ISBN number or the project homepage. epub_identifier = '' A unique identification for the text. epub_uid = '' A list of files that should not be packed into the epub file. -- Extension configuration ------------------------------------------------- -- Options for todo extension ---------------------------------------------- If true, `todo` and `todoList` produce output, else they produce nothing. | 4,791 | en | 0.615538 |
# coding: utf-8
"""
ProcessMaker API
This ProcessMaker I/O API provides access to a BPMN 2.0 compliant workflow engine api that is designed to be used as a microservice to support enterprise cloud applications. The current Alpha 1.0 version supports most of the descriptive class of the BPMN 2.0 specification.
OpenAPI spec version: 1.0.0
Contact: support@processmaker.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import ProcessMaker_PMIO
from ProcessMaker_PMIO.rest import ApiException
from ProcessMaker_PMIO.models.input_output import InputOutput
class TestInputOutput(unittest.TestCase):
""" InputOutput unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testInputOutput(self):
"""
Test InputOutput
"""
model = ProcessMaker_PMIO.models.input_output.InputOutput()
if __name__ == '__main__':
unittest.main()
| test/test_input_output.py | 1,599 | InputOutput unit test stubs
Test InputOutput
ProcessMaker API
This ProcessMaker I/O API provides access to a BPMN 2.0 compliant workflow engine api that is designed to be used as a microservice to support enterprise cloud applications. The current Alpha 1.0 version supports most of the descriptive class of the BPMN 2.0 specification.
OpenAPI spec version: 1.0.0
Contact: support@processmaker.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
coding: utf-8 | 1,006 | en | 0.844335 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.contrib.distribute.python import values
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import keras as keras_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_functional_model():
a = keras.layers.Input(shape=_INPUT_SIZE)
b = keras.layers.Dense(16, activation='relu')(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
return model
def multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
# Read m
interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(32)
return dataset
def get_ds_test_input_fn():
np.random.seed(_RANDOM_SEED)
_, (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_test = keras.utils.to_categorical(y_test)
dataset = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(32)
return dataset
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution):
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution, tpu_strategy.TPUStrategy):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(5,), name='input_b')
# TODO(anjalisridhar): Change the output dimension of the second Dense layer
# once the iterator output validation issue has been fixed.
dense_1 = keras.layers.Dense(7, name='dense_1')
dense_2 = keras.layers.Dense(7, name='dense_2')
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
return model
def get_correctness_test_inputs(use_numpy, with_distribution,
x_train, y_train, x_predict):
"""Generates the inputs for correctness check when enable Keras with DS."""
global_batch_size = 64
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
with_distribution and
with_distribution.__class__.__name__ != 'TPUStrategy')
if use_per_core_batch_size:
batch_size //= with_distribution.num_replicas_in_sync
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': 1,
'shuffle': False,
}
eval_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
}
# TODO(b/119318587): We should not require batch_size when distribution
# is enabled.
if with_distribution:
if use_per_core_batch_size:
predict_batch_size = (
len(x_predict) // with_distribution.num_replicas_in_sync)
else:
predict_batch_size = len(x_predict)
else:
predict_batch_size = None
predict_inputs = {
'batch_size': predict_batch_size,
'x': np.array(x_predict, dtype=np.float32),
}
else:
# For dataset inputs, we do not pass batch_size to
# keras.fit/evaluate/predict. The batch size is part of the dataset.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, with_distribution)
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': 1,
'shuffle': False,
'steps_per_epoch': len(x_train) // global_batch_size,
}
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': 20,
}
predict_batch_size = len(x_predict)
if use_per_core_batch_size:
predict_batch_size //= with_distribution.num_replicas_in_sync
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset,
predict_batch_size, with_distribution)
predict_inputs = {
'batch_size': None,
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
strategies = [combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=[combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus],
mode=['graph'])
def strategy_combinations():
return combinations.combine(
distribution=strategies,
mode=['graph'])
def strategy_and_optimizer_combinations():
return combinations.combine(
distribution=strategies,
optimizer=[combinations.adagrad_optimizer_v1_fn,
combinations.adam_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v1_fn,
combinations.rmsprop_optimizer_v1_fn],
mode=['graph'])
def strategy_and_inputs():
return combinations.combine(
distribution=strategies,
use_numpy=[True, False],
mode=['graph'])
class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(),
'keras_mirrored_strategy_test')
gfile.MakeDirs(self._base_dir)
self._config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED, model_dir=self._base_dir)
self._dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
def tearDown(self):
writer_cache.FileWriterCache.clear()
if os.path.isdir(self._base_dir):
gfile.DeleteRecursively(self._base_dir)
def test_train_functional_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_functional_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist,
eval_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
def test_train_sequential_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self):
train_data, test_data = get_multi_inputs_multi_outputs_data()
def train_input_fn():
input_dict = {
'input_a': train_data['input_a'],
'input_b': train_data['input_b'],
'input_m': train_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': train_data['output_c'],
'dense_3': train_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
def eval_input_fn():
input_dict = {
'input_a': test_data['input_a'],
'input_b': test_data['input_b'],
'input_m': test_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': test_data['output_c'],
'dense_3': test_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
self.do_test_multi_inputs_multi_outputs_with_input_fn(
train_input_fn, eval_input_fn)
def do_test_multi_inputs_multi_outputs_with_input_fn(self, train_input_fn,
eval_input_fn):
config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=self._dist)
with self.cached_session():
model = multi_inputs_multi_outputs_model()
est_keras = keras_lib.model_to_estimator(keras_model=model, config=config)
baseline_eval_results = est_keras.evaluate(
input_fn=eval_input_fn, steps=1)
est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(eval_results['loss'], baseline_eval_results['loss'])
def test_keras_optimizer_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.rmsprop(lr=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(keras_model=keras_model,
config=config)
with self.assertRaisesRegexp(ValueError,
'Only TensorFlow native optimizers are '
'supported with DistributionStrategy.'):
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
class TestDistributionStrategyWithNumpyArrays(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_creating_var_with_numpy_arrays(self, distribution):
with self.cached_session():
x = np.asarray(np.random.random((64, 3)), dtype=np.float32)
var_x = distributed_training_utils.get_var_for_numpy(distribution, x)
val = self.evaluate(var_x.value())
# Verify that the numpy value is copied to the variable.
self.assertAllEqual(x, val)
def test_calculating_batch_params(self):
# This verifies that we calculate the number of steps when the batch size
# is specified.
with self.cached_session():
# 64 is the number of input samples.
inputs = np.zeros((64, 3), dtype=np.float32)
# The number of replicas is equal to 3.
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0',
'/device:GPU:1'])
with self.assertRaisesRegexp(ValueError, 'Please specify a batch_size '
'that is smaller than'):
# The batch size(128) is larger than the number of input
# samples(64).
distributed_training_utils.get_input_batch_params(inputs,
128,
strategy)
with self.assertRaisesRegexp(ValueError, 'is smaller than the number '
'of replicas'):
# The batch size(32) * num_replicas_in_sync(3) is 96 which is greater
# than the number of input samples(64).
distributed_training_utils.get_input_batch_params(inputs,
32,
strategy)
# The number of replicas now is equal to 2.
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
# 32 is the batch size per replica.
steps = distributed_training_utils.get_input_batch_params(inputs,
32,
strategy)
# The number of batches is the ratio of input samples(64) to
# batch size(32) which is 2. The number of steps(1) is the ratio of
# number of batches(2) to the number of replicas(2).
self.assertEqual(steps, 1)
# 16 is the batch size per replica.
steps = distributed_training_utils.get_input_batch_params(inputs,
16,
strategy)
# The number of batches is the ratio of input samples(64) to
# batch size(16) which is 4. The number of steps(2) is the ratio of
# number of batches(4) to the number of replicas(2).
self.assertEqual(steps, 2)
def test_calculating_batch_size(self):
with self.cached_session():
# 64 is the number of input samples.
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
strategy._require_static_shapes = True
model.compile(optimizer, loss, distribute=strategy)
iterator = model._distribution_standardize_user_data(inputs,
targets,
batch_size=None,
check_steps=True,
steps_name='steps',
steps=3)
# The global batch size(21) across all replicas is the ratio of the input
# samples(64) to the steps(3).
# The batch size(10) per device is the ratio of the global batch size(21)
# to the number of replicas(2).
# The global batch size and batch size are rounded integer values.
self.assertEqual(10, distributed_training_utils.get_batch_dimension(
iterator._iterator))
@combinations.generate(strategy_combinations())
def test_calling_model_with_numpy_arrays(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0,
validation_data=(inputs, targets))
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_combinations())
def test_calling_model_with_nested_numpy_arrays(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
targets = [output_d_np, output_e_np]
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_minus_tpu_combinations())
def test_numpy_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
model.fit(inputs, targets, sample_weight=sample_weights, epochs=1,
steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_combinations())
def test_flatten_predict_outputs(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
# We take 6 input samples with each input having a dimension of 3 or 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
outs = model.predict(inputs, steps=1)
# `predict` a list that is equal in length to the number of model outputs.
# In this test our model has two outputs and each element of `outs`
# corresponds to all the samples of one of the model outputs.
self.assertEqual(2, len(outs))
# Each of the output samples have a dimension of 7. We should process all
# the available input samples(6).
self.assertAllEqual([6, 7], outs[0].shape)
self.assertAllEqual([6, 7], outs[1].shape)
class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_calling_model_on_same_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_combinations())
def test_model_interleaved_eval_same_as_direct_eval(self, distribution):
with self.cached_session():
user_controlled_model = get_model()
user_controlled_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
interleaved_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation interleaved
interleaved_output = interleaved_model.fit(
dataset, epochs=2, steps_per_epoch=2, verbose=1,
validation_data=dataset, validation_steps=2, shuffle=False)
# Manually control the validation running after each epoch.
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2))
self.assertEqual(interleaved_output.history['val_loss'],
[x[0] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_mean_absolute_error'],
[x[1] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
[x[2] for x in user_controlled_output])
# TODO(priyag): Enable this test for TPU. Currently tuples/dict don't work
# as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict.
def test_fit_with_tuple_and_dict_dataset_inputs(self):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 5))
output_d_np = np.random.random((10, 7))
output_e_np = np.random.random((10, 7))
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
# Test with dict
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
{'input_a': input_a_np, 'input_b': input_b_np},
(output_d_np, output_e_np)))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_combinations())
def test_fit_eval_and_predict_methods_on_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
with self.cached_session():
model = get_model()
loss = 'mse'
model.compile(optimizer(), loss, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_minus_tpu_combinations())
def test_dataset_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat()
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
def test_dataset_input_shape_validation(self):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, distribute=strategy)
# User forgets to batch the dataset
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.tpu_strategy_one_step],
mode=['graph']))
def test_dataset_input_shape_fully_defined(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
dataset = get_dataset(distribution)
# Input shapes are not fully known. Batch dimension is unknown as we are
# not using the drop_remainder argument.
dataset = dataset.repeat(100).batch(10)
with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
def test_learning_phase_value(self):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
# Lambda layer uses the learning phase.
with self.cached_session():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
optimizer = gradient_descent.GradientDescentOptimizer(0.005)
loss = 'mse'
metrics = ['acc']
strategy = mirrored_strategy.MirroredStrategy(
['/device:GPU:0', '/device:GPU:1'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(8)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history['acc'][0], 0, 0)
model.set_weights(initial_weights)
# TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
# evaluate_output = model.evaluate(dataset, steps=20)
# self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(5)
output = model.predict(predict_dataset, steps=10)
# `predict` runs for 10 steps and in each step you process 10 samples.
ref_output = np.ones((100, 1), dtype=np.float32)
self.assertArrayNear(output, ref_output, 1e-1)
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
def test_validating_dataset_input_tensors_with_shape_mismatch(self):
with self.cached_session():
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
a = constant_op.constant([1, 2], shape=(1, 2))
b = constant_op.constant([[1, 2], [1, 2]], shape=(2, 2))
x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})
y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})
with strategy.scope():
# Removed device and input tensor shape details from the error message
# since the order of the device and the corresponding input tensor shape
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor shapes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
distributed_training_utils.validate_distributed_dataset_inputs(
strategy, x, y)
def test_validating_dataset_input_tensors_with_dtype_mismatch(self):
with self.cached_session():
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
a = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.int32)
b = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.float64)
x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})
y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})
with strategy.scope():
# Removed device and input tensor dtype details from the error message
# since the order of the device and the corresponding input tensor dtype
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor dtypes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
distributed_training_utils.validate_distributed_dataset_inputs(
strategy, x, y)
def test_unsupported_features(self):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
dataset = get_dataset(strategy)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not '
'supported when input `x` is a dataset or a '
'dataset iterator.+'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported when input '
'`x` is a dataset or a dataset iterator.'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test with not specifying the `steps` argument.
with self.assertRaisesRegexp(
ValueError, 'you should specify the `steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.predict(dataset, verbose=0)
def test_calling_with_unsupported_predefined_callbacks(self):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
dataset = get_dataset(strategy)
def schedule(_):
return 0.001
with self.assertRaisesRegexp(ValueError,
'LearningRateScheduler callback is not '
'supported with DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
with self.assertRaisesRegexp(ValueError,
'ReduceLROnPlateau callback is not '
'supported with DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.ReduceLROnPlateau()])
with self.assertRaisesRegexp(ValueError,
'histogram_freq in the TensorBoard callback '
'is not supported when using '
'DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.TensorBoard(histogram_freq=10)])
class TestDistributionStrategyWithLossMasking(test.TestCase):
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
def test_masking(self):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=strategy)
y = np.array([[[1], [1]], [[1], [1]]])
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2)
self.assertEqual(hist.history['loss'][0], 0)
class TestDistributionStrategyWithNormalizationLayer(
test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_batchnorm_correctness(self, distribution):
with self.cached_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
x = x.astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, x))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 32, distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x)
predict_dataset = predict_dataset.repeat(100)
predict_dataset = batch_wrapper(predict_dataset, 32, distribution)
model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)
out = model.predict(predict_dataset, steps=2)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class TestDistributionStrategyCorrectness(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_metric_correctness(self, distribution):
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
# Create identity model.
model = keras.Sequential()
model.add(
keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
metrics=[keras.metrics.BinaryAccuracy()],
distribute=distribution)
batch_size = 64
batch_size //= distribution.num_replicas_in_sync
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = batch_wrapper(train_dataset, batch_size, distribution)
history = model.fit(x=train_dataset, epochs=1, steps_per_epoch=10)
self.assertEqual(history.history['binary_accuracy'], [1.0])
@combinations.generate(strategy_and_inputs())
def test_correctness(self, distribution, use_numpy):
with self.cached_session():
tolerance = 1e-5
if isinstance(distribution, mirrored_strategy.MirroredStrategy):
# TODO(b/119257215): use the default one once the flakyness is fixed.
tolerance = 1e-4
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
# Train, eval, and predict datasets are created with the same input numpy
# arrays.
# TODO(xiejw): Change this back to 10000, once we support final partial
# batch.
num_samples = 9984
x_train = np.random.rand(num_samples, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
x_predict = [[1.], [2.], [3.], [4.]]
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run. In addition, we add few non-linear layers to make
# it non-trivial.
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
initial_weights = model.get_weights()
def fit_and_predict(with_distribution=None):
# We have initialized the model to the same weight for the distribution
# and non-distribution run.
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
distribute=with_distribution)
training_inputs, eval_inputs, predict_inputs = (
get_correctness_test_inputs(use_numpy, with_distribution,
x_train, y_train, x_predict))
model.fit(**training_inputs)
eval_result = model.evaluate(**eval_inputs)
weights = model.get_weights()
predict_result = model.predict(**predict_inputs)
return weights, eval_result, predict_result
wts_with_ds, eval_with_ds, predict_with_ds = fit_and_predict(
with_distribution=distribution)
wts_without_ds, eval_without_ds, predict_without_ds = fit_and_predict(
with_distribution=None)
# Verify that the weights, eval results, predict outputs are the same
# within some limits of tolerance.
self.assertAllClose(
wts_with_ds, wts_without_ds, atol=tolerance, rtol=tolerance)
self.assertAllClose(
eval_with_ds, eval_without_ds, atol=tolerance, rtol=tolerance)
self.assertAllClose(
predict_with_ds, predict_without_ds, atol=tolerance, rtol=tolerance)
# TODO(priyag): Add a test for TPUStrategy with steps_per_run > 1.
if __name__ == '__main__':
test.main()
| tensorflow/contrib/distribute/python/keras_test.py | 48,363 | Generates the inputs for correctness check when enable Keras with DS.
Tests for tf.keras models using DistributionStrategy.
Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== TODO(anjalisridhar): Add a decorator that will allow us to run these tests as part of the tf.keras unit tests suite. Read m TPUs currently require fully defined input shapes, drop_remainder ensures the input will have fully defined shapes. TODO(anjalisridhar): Change the output dimension of the second Dense layer once the iterator output validation issue has been fixed. TODO(b/118776054): Use global batch size for Keras/DS support. TODO(b/119318587): We should not require batch_size when distribution is enabled. For dataset inputs, we do not pass batch_size to keras.fit/evaluate/predict. The batch size is part of the dataset. steps_per_run=2 Verify that the numpy value is copied to the variable. This verifies that we calculate the number of steps when the batch size is specified. 64 is the number of input samples. The number of replicas is equal to 3. The batch size(128) is larger than the number of input samples(64). The batch size(32) * num_replicas_in_sync(3) is 96 which is greater than the number of input samples(64). The number of replicas now is equal to 2. 32 is the batch size per replica. The number of batches is the ratio of input samples(64) to batch size(32) which is 2. The number of steps(1) is the ratio of number of batches(2) to the number of replicas(2). 16 is the batch size per replica. The number of batches is the ratio of input samples(64) to batch size(16) which is 4. The number of steps(2) is the ratio of number of batches(4) to the number of replicas(2). 64 is the number of input samples. The global batch size(21) across all replicas is the ratio of the input samples(64) to the steps(3). The batch size(10) per device is the ratio of the global batch size(21) to the number of replicas(2). The global batch size and batch size are rounded integer values. Call fit with validation data TODO(anjalisridhar): We need tests for when the batch size and steps are smaller and results in a 0 batch_size and steps value. with steps with batch_size with steps with batch_size Call fit with validation data TODO(anjalisridhar): We need tests for when the batch size and steps are smaller and results in a 0 batch_size and steps value. with steps with batch_size with steps with batch_size We take 6 input samples with each input having a dimension of 3 or 5. `predict` a list that is equal in length to the number of model outputs. In this test our model has two outputs and each element of `outs` corresponds to all the samples of one of the model outputs. Each of the output samples have a dimension of 7. We should process all the available input samples(6). Call fit with validation data Call fit with validation interleaved Manually control the validation running after each epoch. TODO(priyag): Enable this test for TPU. Currently tuples/dict don't work as clone_model's input_tensors argument only seems to accept list and not tuples or dict. Test with tuples Test with dict User forgets to batch the dataset Wrong input shape Input shapes are not fully known. Batch dimension is unknown as we are not using the drop_remainder argument. TODO(anjalisridhar): Modify this test to use Lambdas since we can compare meaningful values. Currently we don't pass the learning phase if the Lambda layer uses the learning phase. TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185. evaluate_output = model.evaluate(dataset, steps=20) self.assertAlmostEqual(evaluate_output[1], 1, 0) `predict` runs for 10 steps and in each step you process 10 samples. Removed device and input tensor shape details from the error message since the order of the device and the corresponding input tensor shape is not deterministic over different runs. Removed device and input tensor dtype details from the error message since the order of the device and the corresponding input tensor dtype is not deterministic over different runs. Test with validation split Test with sample weight. Test with not specifying the `steps` argument. TODO(priyag): Enable all strategies for this test. Currently it does not work for TPU due to some invalid datatype. centered on 5.0, variance 10.0 Create identity model. TODO(b/119257215): use the default one once the flakyness is fixed. Train, eval, and predict datasets are created with the same input numpy arrays. TODO(xiejw): Change this back to 10000, once we support final partial batch. The model is built once and the initial weights are saved. This is used to initialize the model for both the distribution and non-distribution run. In addition, we add few non-linear layers to make it non-trivial. We have initialized the model to the same weight for the distribution and non-distribution run. Verify that the weights, eval results, predict outputs are the same within some limits of tolerance. TODO(priyag): Add a test for TPUStrategy with steps_per_run > 1. | 5,624 | en | 0.84422 |
#!/bin/python
# -*- coding: utf-8 -*-
# import numpy as np
# define additional functions used in the *.yaml.
# Of course, as this is a trivial function you could have defined it in the *.yaml directly
def calc_nu(nub):
nu = nub / (1 - nub)
return nu
| pydsge/examples/dfi_funcs.py | 262 | !/bin/python -*- coding: utf-8 -*- import numpy as np define additional functions used in the *.yaml. Of course, as this is a trivial function you could have defined it in the *.yaml directly | 191 | en | 0.927263 |
#!/usr/bin/env python3
# Copyright 2018 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0.
# see get_args() below for usage message.
import argparse
import os
import sys
import math
import re
# The use of latin-1 encoding does not preclude reading utf-8. latin-1
# encoding means "treat words as sequences of bytes", and it is compatible
# with utf-8 encoding as well as other encodings such as gbk, as long as the
# spaces are also spaces in ascii (which we check). It is basically how we
# emulate the behavior of python before python3.
sys.stdout = open(1, 'w', encoding='latin-1', closefd=False)
sys.stderr = open(2, 'w', encoding='latin-1', closefd=False)
def get_args():
parser = argparse.ArgumentParser(description="""This script creates the
text form of a lexicon FST, to be compiled by fstcompile using the
appropriate symbol tables (phones.txt and words.txt) . It will mostly
be invoked indirectly via utils/prepare_lang.sh. The output goes to
the stdout.""")
parser.add_argument('--sil-phone', dest='sil_phone', type=str,
help="""Text form of optional-silence phone, e.g. 'SIL'. See also
the --silprob option.""")
parser.add_argument('--sil-prob', dest='sil_prob', type=float, default=0.0,
help="""Probability of silence between words (including at the
beginning and end of word sequences). Must be in the range [0.0, 1.0].
This refers to the optional silence inserted by the lexicon; see
the --silphone option.""")
parser.add_argument('--sil-disambig', dest='sil_disambig', type=str,
help="""Disambiguation symbol to disambiguate silence, e.g. #5.
Will only be supplied if you are creating the version of L.fst
with disambiguation symbols, intended for use with cyclic G.fst.
This symbol was introduced to fix a rather obscure source of
nondeterminism of CLG.fst, that has to do with reordering of
disambiguation symbols and phone symbols.""")
parser.add_argument('--left-context-phones', dest='left_context_phones', type=str,
help="""Only relevant if --nonterminals is also supplied; this relates
to grammar decoding (see http://kaldi-asr.org/doc/grammar.html or
src/doc/grammar.dox). Format is a list of left-context phones,
in text form, one per line. E.g. data/lang/phones/left_context_phones.txt""")
parser.add_argument('--nonterminals', type=str,
help="""If supplied, --left-context-phones must also be supplied.
List of user-defined nonterminal symbols such as #nonterm:contact_list,
one per line. E.g. data/local/dict/nonterminals.txt.""")
parser.add_argument('lexiconp', type=str,
help="""Filename of lexicon with pronunciation probabilities
(normally lexiconp.txt), with lines of the form 'word prob p1 p2...',
e.g. 'a 1.0 ay'""")
args = parser.parse_args()
return args
def read_lexiconp(filename):
"""Reads the lexiconp.txt file in 'filename', with lines like 'word pron p1 p2 ...'.
Returns a list of tuples (word, pron_prob, pron), where 'word' is a string,
'pron_prob', a float, is the pronunciation probability (which must be >0.0
and would normally be <=1.0), and 'pron' is a list of strings representing phones.
An element in the returned list might be ('hello', 1.0, ['h', 'eh', 'l', 'ow']).
"""
ans = []
found_empty_prons = False
found_large_pronprobs = False
# See the comment near the top of this file, RE why we use latin-1.
with open(filename, 'r', encoding='latin-1') as f:
whitespace = re.compile("[ \t]+")
for line in f:
a = whitespace.split(line.strip(" \t\r\n"))
if len(a) < 2:
print("{0}: error: found bad line '{1}' in lexicon file {2} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
word = a[0]
if word == "<eps>":
# This would clash with the epsilon symbol normally used in OpenFst.
print("{0}: error: found <eps> as a word in lexicon file "
"{1}".format(line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
try:
pron_prob = float(a[1])
except:
print("{0}: error: found bad line '{1}' in lexicon file {2}, 2nd field "
"should be pron-prob".format(sys.argv[0], line.strip(" \t\r\n"), filename),
file=sys.stderr)
sys.exit(1)
prons = a[2:]
if pron_prob <= 0.0:
print("{0}: error: invalid pron-prob in line '{1}' of lexicon file {1} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
if len(prons) == 0:
found_empty_prons = True
ans.append( (word, pron_prob, prons) )
if pron_prob > 1.0:
found_large_pronprobs = True
if found_empty_prons:
print("{0}: warning: found at least one word with an empty pronunciation "
"in lexicon file {1}.".format(sys.argv[0], filename),
file=sys.stderr)
if found_large_pronprobs:
print("{0}: warning: found at least one word with pron-prob >1.0 "
"in {1}".format(sys.argv[0], filename), file=sys.stderr)
if len(ans) == 0:
print("{0}: error: found no pronunciations in lexicon file {1}".format(
sys.argv[0], filename), file=sys.stderr)
sys.exit(1)
return ans
def write_nonterminal_arcs(start_state, loop_state, next_state,
nonterminals, left_context_phones):
"""This function relates to the grammar-decoding setup, see
kaldi-asr.org/doc/grammar.html. It is called from write_fst_no_silence
and write_fst_silence, and writes to the stdout some extra arcs
in the lexicon FST that relate to nonterminal symbols.
See the section "Special symbols in L.fst,
kaldi-asr.org/doc/grammar.html#grammar_special_l.
start_state: the start-state of L.fst.
loop_state: the state of high out-degree in L.fst where words leave
and enter.
next_state: the number from which this function can start allocating its
own states. the updated value of next_state will be returned.
nonterminals: the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
left_context_phones: a list of phones that may appear as left-context,
e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
shared_state = next_state
next_state += 1
final_state = next_state
next_state += 1
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=start_state, dest=shared_state,
phone='#nonterm_begin', word='#nonterm_begin',
cost=0.0))
for nonterminal in nonterminals:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=shared_state,
phone=nonterminal, word=nonterminal,
cost=0.0))
# this_cost equals log(len(left_context_phones)) but the expression below
# better captures the meaning. Applying this cost to arcs keeps the FST
# stochatic (sum-to-one, like an HMM), so that if we do weight pushing
# things won't get weird. In the grammar-FST code when we splice things
# together we will cancel out this cost, see the function CombineArcs().
this_cost = -math.log(1.0 / len(left_context_phones))
for left_context_phone in left_context_phones:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=shared_state, dest=loop_state,
phone=left_context_phone, word='<eps>', cost=this_cost))
# arc from loop-state to a final-state with #nonterm_end as ilabel and olabel
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=final_state,
phone='#nonterm_end', word='#nonterm_end', cost=0.0))
print("{state}\t{final_cost}".format(
state=final_state, final_cost=0.0))
return next_state
def write_fst_no_silence(lexicon, nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob=0.0, meaning there is no optional silence allowed.
'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by
read_lexiconp().
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
loop_state = 0
next_state = 1 # the next un-allocated state, will be incremented as we go.
for (word, pronprob, pron) in lexicon:
cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=(cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_fst_with_silence(lexicon, sil_prob, sil_phone, sil_disambig,
nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob != 0.0, meaning there is optional silence
'lexicon' is a list of 3-tuples (word, pron-prob, prons)
as returned by read_lexiconp().
'sil_prob', which is expected to be strictly between 0.. and 1.0, is the
probability of silence
'sil_phone' is the silence phone, e.g. "SIL".
'sil_disambig' is either None, or the silence disambiguation symbol, e.g. "#5".
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
assert sil_prob > 0.0 and sil_prob < 1.0
sil_cost = -math.log(sil_prob)
no_sil_cost = -math.log(1.0 - sil_prob);
start_state = 0
loop_state = 1 # words enter and leave from here
sil_state = 2 # words terminate here when followed by silence; this state
# has a silence transition to loop_state.
next_state = 3 # the next un-allocated state, will be incremented as we go.
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=loop_state,
phone='<eps>', word='<eps>', cost=no_sil_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=sil_state,
phone='<eps>', word='<eps>', cost=sil_cost))
if sil_disambig is None:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=loop_state,
phone=sil_phone, word='<eps>', cost=0.0))
else:
sil_disambig_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=sil_disambig_state,
phone=sil_phone, word='<eps>', cost=0.0))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_disambig_state, dest=loop_state,
phone=sil_disambig, word='<eps>', cost=0.0))
for (word, pronprob, pron) in lexicon:
pron_cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state, dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(pron_cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=no_sil_cost + (pron_cost if i <= 0 else 0.0)))
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=sil_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=sil_cost + (pron_cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_words_txt(orig_lines, highest_numbered_symbol, nonterminals, filename):
"""Writes updated words.txt to 'filename'. 'orig_lines' is the original lines
in the words.txt file as a list of strings (without the newlines);
highest_numbered_symbol is the highest numbered symbol in the original
words.txt; nonterminals is a list of strings like '#nonterm:foo'."""
with open(filename, 'w', encoding='latin-1') as f:
for l in orig_lines:
print(l, file=f)
cur_symbol = highest_numbered_symbol + 1
for n in [ '#nonterm_begin', '#nonterm_end' ] + nonterminals:
print("{0} {1}".format(n, cur_symbol), file=f)
cur_symbol = cur_symbol + 1
def read_nonterminals(filename):
"""Reads the user-defined nonterminal symbols in 'filename', checks that
it has the expected format and has no duplicates, and returns the nonterminal
symbols as a list of strings, e.g.
['#nonterm:contact_list', '#nonterm:phone_number', ... ]. """
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no nonterminals symbols.".format(filename))
for nonterm in ans:
if nonterm[:9] != '#nonterm:':
raise RuntimeError("In file '{0}', expected nonterminal symbols to start with '#nonterm:', found '{1}'"
.format(filename, nonterm))
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def read_left_context_phones(filename):
"""Reads, checks, and returns a list of left-context phones, in text form, one
per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]"""
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no left-context phones.".format(filename))
whitespace = re.compile("[ \t]+")
for s in ans:
if len(whitespace.split(s)) != 1:
raise RuntimeError("The file {0} contains an invalid line '{1}'".format(filename, s) )
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def is_token(s):
"""Returns true if s is a string and is space-free."""
if not isinstance(s, str):
return False
whitespace = re.compile("[ \t\r\n]+")
split_str = whitespace.split(s);
return len(split_str) == 1 and s == split_str[0]
def main():
args = get_args()
lexicon = read_lexiconp(args.lexiconp)
if args.nonterminals is None:
nonterminals, left_context_phones = None, None
else:
if args.left_context_phones is None:
print("{0}: if --nonterminals is specified, --left-context-phones must also "
"be specified".format(sys.argv[0]))
sys.exit(1)
nonterminals = read_nonterminals(args.nonterminals)
left_context_phones = read_left_context_phones(args.left_context_phones)
if args.sil_prob == 0.0:
write_fst_no_silence(lexicon,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
else:
# Do some checking that the options make sense.
if args.sil_prob < 0.0 or args.sil_prob >= 1.0:
print("{0}: invalid value specified --sil-prob={1}".format(
sys.argv[0], args.sil_prob), file=sys.stderr)
sys.exit(1)
if not is_token(args.sil_phone):
print("{0}: you specified --sil-prob={1} but --sil-phone is set "
"to '{2}'".format(sys.argv[0], args.sil_prob, args.sil_phone),
file=sys.stderr)
sys.exit(1)
if args.sil_disambig is not None and not is_token(args.sil_disambig):
print("{0}: invalid value --sil-disambig='{1}' was specified."
"".format(sys.argv[0], args.sil_disambig), file=sys.stderr)
sys.exit(1)
write_fst_with_silence(lexicon, args.sil_prob, args.sil_phone,
args.sil_disambig,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
# (lines, highest_symbol) = read_words_txt(args.input_words_txt)
# nonterminals = read_nonterminals(args.nonterminal_symbols_list)
# write_words_txt(lines, highest_symbol, nonterminals, args.output_words_txt)
if __name__ == '__main__':
main()
| egs/wsj/s5/utils/lang/make_lexicon_fst.py | 19,129 | Returns true if s is a string and is space-free.
Reads, checks, and returns a list of left-context phones, in text form, one
per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]
Reads the lexiconp.txt file in 'filename', with lines like 'word pron p1 p2 ...'.
Returns a list of tuples (word, pron_prob, pron), where 'word' is a string,
'pron_prob', a float, is the pronunciation probability (which must be >0.0
and would normally be <=1.0), and 'pron' is a list of strings representing phones.
An element in the returned list might be ('hello', 1.0, ['h', 'eh', 'l', 'ow']).
Reads the user-defined nonterminal symbols in 'filename', checks that
it has the expected format and has no duplicates, and returns the nonterminal
symbols as a list of strings, e.g.
['#nonterm:contact_list', '#nonterm:phone_number', ... ].
Writes the text format of L.fst to the standard output. This version is for
when --sil-prob=0.0, meaning there is no optional silence allowed.
'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by
read_lexiconp().
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
Writes the text format of L.fst to the standard output. This version is for
when --sil-prob != 0.0, meaning there is optional silence
'lexicon' is a list of 3-tuples (word, pron-prob, prons)
as returned by read_lexiconp().
'sil_prob', which is expected to be strictly between 0.. and 1.0, is the
probability of silence
'sil_phone' is the silence phone, e.g. "SIL".
'sil_disambig' is either None, or the silence disambiguation symbol, e.g. "#5".
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
This function relates to the grammar-decoding setup, see
kaldi-asr.org/doc/grammar.html. It is called from write_fst_no_silence
and write_fst_silence, and writes to the stdout some extra arcs
in the lexicon FST that relate to nonterminal symbols.
See the section "Special symbols in L.fst,
kaldi-asr.org/doc/grammar.html#grammar_special_l.
start_state: the start-state of L.fst.
loop_state: the state of high out-degree in L.fst where words leave
and enter.
next_state: the number from which this function can start allocating its
own states. the updated value of next_state will be returned.
nonterminals: the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
left_context_phones: a list of phones that may appear as left-context,
e.g. ['a', 'ah', ... '#nonterm_bos'].
Writes updated words.txt to 'filename'. 'orig_lines' is the original lines
in the words.txt file as a list of strings (without the newlines);
highest_numbered_symbol is the highest numbered symbol in the original
words.txt; nonterminals is a list of strings like '#nonterm:foo'.
!/usr/bin/env python3 Copyright 2018 Johns Hopkins University (author: Daniel Povey) Apache 2.0. see get_args() below for usage message. The use of latin-1 encoding does not preclude reading utf-8. latin-1 encoding means "treat words as sequences of bytes", and it is compatible with utf-8 encoding as well as other encodings such as gbk, as long as the spaces are also spaces in ascii (which we check). It is basically how we emulate the behavior of python before python3. See the comment near the top of this file, RE why we use latin-1. This would clash with the epsilon symbol normally used in OpenFst. this_cost equals log(len(left_context_phones)) but the expression below better captures the meaning. Applying this cost to arcs keeps the FST stochatic (sum-to-one, like an HMM), so that if we do weight pushing things won't get weird. In the grammar-FST code when we splice things together we will cancel out this cost, see the function CombineArcs(). arc from loop-state to a final-state with nonterm_end as ilabel and olabel the next un-allocated state, will be incremented as we go. note: i == -1 if pron is empty. words enter and leave from here words terminate here when followed by silence; this state has a silence transition to loop_state. the next un-allocated state, will be incremented as we go. note: i == -1 if pron is empty. Do some checking that the options make sense. (lines, highest_symbol) = read_words_txt(args.input_words_txt) nonterminals = read_nonterminals(args.nonterminal_symbols_list) write_words_txt(lines, highest_symbol, nonterminals, args.output_words_txt) | 5,183 | en | 0.823022 |
# Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
| falcon/inspect.py | 26,938 | Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Filter the internal elements of an iterable.
Try to get the definition file and line of obj.
Return default on error.
Attempt to get the definition file and line of obj and its name.
Check if the module of the object is a falcon module.
Return a string from the list of methods.
Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
Get the current tabulation.
Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
Visit a AppInfo instance. Usually called by `process`.
Visit a ErrorHandlerInfo instance. Usually called by `process`.
Visit a MiddlewareInfo instance. Usually called by `process`.
Visit a ErrorHandlerInfo instance. Usually called by `process`.
Visit a MiddlewareMethodInfo instance. Usually called by `process`.
Visit a MiddlewareTreeInfo instance. Usually called by `process`.
Visit a MiddlewareTreeItemInfo instance. Usually called by `process`.
Visit a RouteInfo instance. Usually called by `process`.
Visit a RouteMethodInfo instance. Usually called by `process`.
Visit a SinkInfo instance. Usually called by `process`.
Visit a StaticRouteInfo instance. Usually called by `process`.
Inspect utilities for falcon applications.
Copyright 2020 by Federico Caselli Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. router inspection registry type: Dict[Type, Callable] type: List[RouteInfo] ------------------------------------------------------------------------ Inspection classes ------------------------------------------------------------------------ NOTE(CaselIT): internal falcon names do not start with on and do not have suffix added for compatibility with RouteMethodInfo ------------------------------------------------------------------------ Visitor classes ------------------------------------------------------------------------ ------------------------------------------------------------------------ Helpers functions ------------------------------------------------------------------------ NOTE(vytas): If Falcon is cythonized, all default responders coming from cythonized modules will appear as built-in functions, and raise a TypeError when trying to locate the source file. NOTE(caselit): a class instances return None. Try the type | 9,742 | en | 0.735536 |
# -*- coding: utf-8 -*-
# Copyright 2019 Spotify AB. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import configparser
import datetime
import hashlib
import os
import shutil
import tempfile
import unittest
import medusa.storage.abstract_storage
from medusa.backup import generate_md5_hash
from medusa.config import MedusaConfig, StorageConfig, _namedtuple_from_dict, CassandraConfig
from medusa.index import build_indices
from medusa.storage import Storage
class RestoreNodeTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.local_storage_dir = "/tmp/medusa_local_storage"
self.medusa_bucket_dir = "/tmp/medusa_test_bucket"
def setUp(self):
if os.path.isdir(self.local_storage_dir):
shutil.rmtree(self.local_storage_dir)
if os.path.isdir(self.medusa_bucket_dir):
shutil.rmtree(self.medusa_bucket_dir)
os.makedirs(self.local_storage_dir)
config = configparser.ConfigParser(interpolation=None)
config['storage'] = {
'host_file_separator': ',',
'bucket_name': 'medusa_test_bucket',
'key_file': '',
'storage_provider': 'local',
'prefix': '',
'fqdn': '127.0.0.1',
'api_key_or_username': '',
'api_secret_or_password': '',
'base_path': '/tmp'
}
config['cassandra'] = {
'is_ccm': 1
}
self.config = MedusaConfig(
storage=_namedtuple_from_dict(StorageConfig, config['storage']),
cassandra=_namedtuple_from_dict(CassandraConfig, config['cassandra']),
monitoring={},
ssh=None,
restore=None
)
self.storage = Storage(config=self.config.storage)
def test_add_object_from_string(self):
file_content = "content of the test file"
self.storage.storage_driver.upload_blob_from_string("test1/file.txt", file_content)
self.assertEqual(self.storage.storage_driver.get_blob_content_as_string("test1/file.txt"), file_content)
def test_download_blobs(self):
files_to_download = list()
file1_content = "content of the test file1"
file2_content = "content of the test file2"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
files_to_download.append("test_download_blobs1/file1.txt")
self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content)
files_to_download.append("test_download_blobs2/file2.txt")
self.assertEqual(len(os.listdir(self.medusa_bucket_dir)), 2)
self.storage.storage_driver.download_blobs(files_to_download, self.local_storage_dir)
self.assertEqual(len(os.listdir(self.local_storage_dir)), 2)
def test_list_objects(self):
file1_content = "content of the test file1"
file2_content = "content of the test file2"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content)
objects = self.storage.storage_driver.list_objects()
self.assertEqual(len(objects), 2)
one_object = self.storage.storage_driver.list_objects("test_download_blobs2")
self.assertEqual(len(one_object), 1)
def test_read_blob(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
objects = self.storage.storage_driver.list_objects("test_download_blobs1")
object_content = self.storage.storage_driver.read_blob_as_string(objects[0])
self.assertEqual(object_content, file1_content)
def test_get_blob(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(obj.name, "test_download_blobs1/file1.txt")
def test_read_blob_as_bytes(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
object_content = self.storage.storage_driver.get_blob_content_as_bytes("test_download_blobs1/file1.txt")
self.assertEqual(object_content, b"content of the test file1")
def test_verify_hash(self):
file1_content = "content of the test file1"
manifest = self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(manifest.MD5, obj.hash)
def test_hashes_match(self):
# Should match
hash1 = "S1EAM/BVMqhbJnAUs/nWlQ=="
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertTrue(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
# Should match
hash1 = "4b510033f05532a85b267014b3f9d695"
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertTrue(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
# Should not match
hash1 = "S1EAM/BVMqhbJnAUs/nWlQsdfsdf=="
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertFalse(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
def test_generate_md5_hash(self):
with tempfile.NamedTemporaryFile() as tf:
# write random bytes
two_megabytes = 2 * 1024 * 1024
tf.write(os.urandom(two_megabytes))
tf.flush()
# compute checksum of the whole file at once
tf.seek(0)
checksum_full = hashlib.md5(tf.read()).digest()
digest_full = base64.encodestring(checksum_full).decode('UTF-8').strip()
# compute checksum using default-size chunks
tf.seek(0)
digest_chunk = generate_md5_hash(tf.name)
# compare the digests
self.assertEqual(digest_chunk, digest_full)
# compute checksum using custom size chunks
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=128))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=256))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=1024))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=100000000)) # 100M
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=-1))
tf.seek(0)
self.assertNotEqual(digest_full, generate_md5_hash(tf.name, block_size=0))
def test_get_object_datetime(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(
datetime.datetime.fromtimestamp(int(obj.extra["modify_time"])),
self.storage.storage_driver.get_object_datetime(obj)
)
def test_get_fqdn_from_backup_index_blob(self):
blob_name = "index/backup_index/2019051307/manifest_node1.whatever.com.json"
self.assertEqual(
"node1.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node2.whatever.com.cql"
self.assertEqual(
"node2.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node3.whatever.com.txt"
self.assertEqual(
"node3.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node_with_underscores.whatever.com.txt"
self.assertEqual(
"node_with_underscores.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
def test_get_fqdn_from_any_index_blob(self):
blob_name = "tokenmap_hostname-with-dashes-and-3-numbers.json"
self.assertEqual(
"hostname-with-dashes-and-3-numbers",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "tokenmap_hostname-with-dashes.and-dots.json"
self.assertEqual(
"hostname-with-dashes.and-dots",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "tokenmap_hostname_with-underscores.and-dots-and.dashes.json"
self.assertEqual(
"hostname_with-underscores.and-dots-and.dashes",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/bi/third_backup/finished_localhost_1574343029.timestamp"
self.assertEqual(
"localhost",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
def test_parse_backup_index(self):
file_content = "content of the test file"
# SSTables for node1 and backup1
self.storage.storage_driver.upload_blob_from_string("node1/backup1/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/data/ks1/sstable2.db", file_content)
# Metadata for node1 and backup1
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/schema.cql", file_content)
# SSTables for node2 and backup1
self.storage.storage_driver.upload_blob_from_string("node2/backup1/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/data/ks1/sstable2.db", file_content)
# Metadata for node2 and backup1
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/schema.cql", file_content)
# SSTables for node1 and backup2
self.storage.storage_driver.upload_blob_from_string("node1/backup2/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/data/ks1/sstable2.db", file_content)
# Metadata for node1 and backup2
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/schema.cql", file_content)
build_indices(self.config, False)
path = 'index/backup_index'
backup_index = self.storage.storage_driver.list_objects(path)
blobs_by_backup = self.storage.group_backup_index_by_backup_and_node(backup_index)
self.assertTrue("backup1" in blobs_by_backup)
self.assertTrue("backup2" in blobs_by_backup)
self.assertTrue("node1" in blobs_by_backup["backup1"])
self.assertTrue("node2" in blobs_by_backup["backup1"])
self.assertTrue("node1" in blobs_by_backup["backup2"])
self.assertFalse("node2" in blobs_by_backup["backup2"])
def test_remove_extension(self):
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.txt')
)
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.timestamp')
)
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.cql')
)
self.assertEqual(
'localhost.foo',
self.storage.remove_extension('localhost.foo')
)
def test_get_timestamp_from_blob_name(self):
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_localhost_1558021519.timestamp')
)
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_some.host.net_1558021519.timestamp')
)
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_some_underscores.host.net_1558021519.timestamp')
)
self.assertEqual(
1574343029,
self.storage.get_timestamp_from_blob_name('index/bi/third_backup/finished_localhost_1574343029.timestamp')
)
if __name__ == '__main__':
unittest.main()
| tests/storage_test.py | 13,916 | -*- coding: utf-8 -*- Copyright 2019 Spotify AB. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Should match Should match Should not match write random bytes compute checksum of the whole file at once compute checksum using default-size chunks compare the digests compute checksum using custom size chunks 100M SSTables for node1 and backup1 Metadata for node1 and backup1 SSTables for node2 and backup1 Metadata for node2 and backup1 SSTables for node1 and backup2 Metadata for node1 and backup2 | 988 | en | 0.822118 |
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import range
import numpy as np
from batchgenerators.augmentations.utils import pad_nd_image
def center_crop(data, crop_size, seg=None):
return crop(data, seg, crop_size, 0, 'center')
def get_lbs_for_random_crop(crop_size, data_shape, margins):
"""
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:param margins:
:return:
"""
lbs = []
for i in range(len(data_shape) - 2):
if data_shape[i+2] - crop_size[i] - margins[i] > margins[i]:
lbs.append(np.random.randint(margins[i], data_shape[i+2] - crop_size[i] - margins[i]))
else:
lbs.append((data_shape[i+2] - crop_size[i]) // 2)
return lbs
def get_lbs_for_center_crop(crop_size, data_shape):
"""
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:return:
"""
lbs = []
for i in range(len(data_shape) - 2):
lbs.append((data_shape[i + 2] - crop_size[i]) // 2)
return lbs
def crop(data, seg=None, crop_size=128, margins=(0, 0, 0), crop_type="center",
pad_mode='constant', pad_kwargs={'constant_values': 0},
pad_mode_seg='constant', pad_kwargs_seg={'constant_values': 0}):
"""
crops data and seg (seg may be None) to crop_size. Whether this will be achieved via center or random crop is
determined by crop_type. Margin will be respected only for random_crop and will prevent the crops form being closer
than margin to the respective image border. crop_size can be larger than data_shape - margin -> data/seg will be
padded with zeros in that case. margins can be negative -> results in padding of data/seg followed by cropping with
margin=0 for the appropriate axes
:param data: b, c, x, y(, z)
:param seg:
:param crop_size:
:param margins: distance from each border, can be int or list/tuple of ints (one element for each dimension).
Can be negative (data/seg will be padded if needed)
:param crop_type: random or center
:return:
"""
if not isinstance(data, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
data_shape = tuple([len(data)] + list(data[0].shape))
data_dtype = data[0].dtype
dim = len(data_shape) - 2
if seg is not None:
seg_shape = tuple([len(seg)] + list(seg[0].shape))
seg_dtype = seg[0].dtype
if not isinstance(seg, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
assert all([i == j for i, j in zip(seg_shape[2:], data_shape[2:])]), "data and seg must have the same spatial " \
"dimensions. Data: %s, seg: %s" % \
(str(data_shape), str(seg_shape))
if type(crop_size) not in (tuple, list, np.ndarray):
crop_size = [crop_size] * dim
else:
assert len(crop_size) == len(
data_shape) - 2, "If you provide a list/tuple as center crop make sure it has the same dimension as your " \
"data (2d/3d)"
if not isinstance(margins, (np.ndarray, tuple, list)):
margins = [margins] * dim
data_return = np.zeros([data_shape[0], data_shape[1]] + list(crop_size), dtype=data_dtype)
if seg is not None:
seg_return = np.zeros([seg_shape[0], seg_shape[1]] + list(crop_size), dtype=seg_dtype)
else:
seg_return = None
for b in range(data_shape[0]):
data_shape_here = [data_shape[0]] + list(data[b].shape)
if seg is not None:
seg_shape_here = [seg_shape[0]] + list(seg[b].shape)
if crop_type == "center":
lbs = get_lbs_for_center_crop(crop_size, data_shape_here)
elif crop_type == "random":
lbs = get_lbs_for_random_crop(crop_size, data_shape_here, margins)
else:
raise NotImplementedError("crop_type must be either center or random")
need_to_pad = [[0, 0]] + [[abs(min(0, lbs[d])),
abs(min(0, data_shape_here[d + 2] - (lbs[d] + crop_size[d])))]
for d in range(dim)]
# we should crop first, then pad -> reduces i/o for memmaps, reduces RAM usage and improves speed
ubs = [min(lbs[d] + crop_size[d], data_shape_here[d+2]) for d in range(dim)]
lbs = [max(0, lbs[d]) for d in range(dim)]
slicer_data = [slice(0, data_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
data_cropped = data[b][tuple(slicer_data)]
if seg_return is not None:
slicer_seg = [slice(0, seg_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
seg_cropped = seg[b][tuple(slicer_seg)]
if any([i > 0 for j in need_to_pad for i in j]):
data_return[b] = np.pad(data_cropped, need_to_pad, pad_mode, **pad_kwargs)
if seg_return is not None:
seg_return[b] = np.pad(seg_cropped, need_to_pad, pad_mode_seg, **pad_kwargs_seg)
else:
data_return[b] = data_cropped
if seg_return is not None:
seg_return[b] = seg_cropped
return data_return, seg_return
def random_crop(data, seg=None, crop_size=128, margins=[0, 0, 0]):
return crop(data, seg, crop_size, margins, 'random')
def pad_nd_image_and_seg(data, seg, new_shape=None, must_be_divisible_by=None, pad_mode_data='constant',
np_pad_kwargs_data=None, pad_mode_seg='constant', np_pad_kwargs_seg=None):
"""
Pads data and seg to new_shape. new_shape is thereby understood as min_shape (if data/seg is already larger then
new_shape the shape stays the same for the dimensions this applies)
:param data:
:param seg:
:param new_shape: if none then only must_be_divisible_by is applied
:param must_be_divisible_by: UNet like architectures sometimes require the input to be divisibly by some number. This
will modify new_shape if new_shape is not divisibly by this (by increasing it accordingly).
must_be_divisible_by should be a list of int (one for each spatial dimension) and this list must have the same
length as new_shape
:param pad_mode_data: see np.pad
:param np_pad_kwargs_data:see np.pad
:param pad_mode_seg:see np.pad
:param np_pad_kwargs_seg:see np.pad
:return:
"""
sample_data = pad_nd_image(data, new_shape, mode=pad_mode_data, kwargs=np_pad_kwargs_data,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
if seg is not None:
sample_seg = pad_nd_image(seg, new_shape, mode=pad_mode_seg, kwargs=np_pad_kwargs_seg,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
else:
sample_seg = None
return sample_data, sample_seg
| data/crop_and_pad_augmentations.py | 7,604 | crops data and seg (seg may be None) to crop_size. Whether this will be achieved via center or random crop is
determined by crop_type. Margin will be respected only for random_crop and will prevent the crops form being closer
than margin to the respective image border. crop_size can be larger than data_shape - margin -> data/seg will be
padded with zeros in that case. margins can be negative -> results in padding of data/seg followed by cropping with
margin=0 for the appropriate axes
:param data: b, c, x, y(, z)
:param seg:
:param crop_size:
:param margins: distance from each border, can be int or list/tuple of ints (one element for each dimension).
Can be negative (data/seg will be padded if needed)
:param crop_type: random or center
:return:
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:return:
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:param margins:
:return:
Pads data and seg to new_shape. new_shape is thereby understood as min_shape (if data/seg is already larger then
new_shape the shape stays the same for the dimensions this applies)
:param data:
:param seg:
:param new_shape: if none then only must_be_divisible_by is applied
:param must_be_divisible_by: UNet like architectures sometimes require the input to be divisibly by some number. This
will modify new_shape if new_shape is not divisibly by this (by increasing it accordingly).
must_be_divisible_by should be a list of int (one for each spatial dimension) and this list must have the same
length as new_shape
:param pad_mode_data: see np.pad
:param np_pad_kwargs_data:see np.pad
:param pad_mode_seg:see np.pad
:param np_pad_kwargs_seg:see np.pad
:return:
Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. we should crop first, then pad -> reduces i/o for memmaps, reduces RAM usage and improves speed | 2,409 | en | 0.819769 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.node as node
import logging
logging.basicConfig(level=logging.DEBUG)
# run the gapic generator
gapic = gcp.GAPICBazel()
versions = ['v1beta2']
name = 'memcache'
for version in versions:
library = gapic.node_library(name, version)
s.copy(library, excludes=['package.json', 'README.md'])
# Copy common templates
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(source_location='build/src')
s.copy(templates, excludes=[])
node.postprocess_gapic_library()
| synth.py | 1,217 | This script is used to synthesize generated parts of this library.
Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. run the gapic generator Copy common templates | 662 | en | 0.859913 |
# Data Preprocessing Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)""" | Part 2 - Regression/Section 4 - Simple Linear Regression/data_preprocessing_template.py | 684 | Data Preprocessing Template Importing the libraries Importing the dataset Splitting the dataset into the Training set and Test set Feature Scaling | 146 | en | 0.703396 |
import serial
import pynmea2
# Probando con el pincho usb azul
ser = serial.Serial('/dev/ttyUSB0',4800)
while 1:
try:
data = ser.readline().decode('utf-8')
if(data.startswith("$GPGGA")):
parse = pynmea2.parse(data)
print(repr(parse))
except UnicodeDecodeError:
continue | CTD_controller/gps_test1.py | 327 | Probando con el pincho usb azul | 31 | es | 0.863485 |
####################################################################################################
"""
adres_dataset.py
This module implements several classes to perform dataset-specific downloading, saving and
data-transformation operations.
Written by Swaan Dekkers & Thomas Jongstra
"""
####################################################################################################
#############
## Imports ##
#############
from pathlib import Path
import pandas.io.sql as sqlio
import pandas as pd
import numpy as np
import requests
import psycopg2
import time
import os
import re
# Import own modules.
import datasets, clean
# Define HOME and DATA_PATH on a global level.
HOME = Path.home() # Home path for old VAO.
# USERNAME = os.path.basename(HOME)
# HOME = os.path.join('/data', USERNAME) # Set home for new VAO.
DATA_PATH = os.path.join(HOME, 'Documents/woonfraude/data/')
########################
## AdresDataset class ##
########################
class AdresDataset(datasets.MyDataset):
"""Create a dataset for the adres data."""
# Set the class attributes.
name = 'adres'
table_name = 'import_adres'
id_column = 'adres_id'
def extract_leegstand(self):
"""Create a column indicating leegstand (no inhabitants on the address)."""
self.data['leegstand'] = ~self.data.inwnrs.notnull()
self.version += '_leegstand'
self.save()
def enrich_with_woning_id(self):
"""Add woning ids to the adres dataframe."""
adres_periodes = datasets.download_dataset('bwv_adres_periodes', 'bwv_adres_periodes')
self.data = self.data.merge(adres_periodes[['ads_id', 'wng_id']], how='left', left_on='adres_id', right_on='ads_id')
self.version += '_woningId'
self.save()
def prepare_bag(self, bag):
# To int
bag['huisnummer_nummeraanduiding'] = bag['huisnummer_nummeraanduiding'].astype(int)
bag['huisnummer_nummeraanduiding'] = bag['huisnummer_nummeraanduiding'].replace(0, -1)
# Fillna and replace ''
bag['huisletter_nummeraanduiding'] = bag['huisletter_nummeraanduiding'].replace('', 'None')
# bag['_openbare_ruimte_naam@bag'] = bag['_openbare_ruimte_naam@bag'].fillna('None')
bag['_openbare_ruimte_naam_nummeraanduiding'] = bag['_openbare_ruimte_naam_nummeraanduiding'].replace('', 'None')
# bag['_huisnummer_toevoeging@bag'] = bag['_huisnummer_toevoeging@bag'].fillna('None')
bag['huisnummer_toevoeging_nummeraanduiding'] = bag['huisnummer_toevoeging_nummeraanduiding'].replace('', 'None')
return bag
def prepare_adres(self, adres):
# To int
adres['hsnr'] = adres['hsnr'].astype(int)
adres['hsnr'] = adres['hsnr'].replace(0, -1)
return adres
def replace_string_nan_adres(self, adres):
adres['hsnr'] = adres['hsnr'].replace(-1, np.nan)
adres['sttnaam'] = adres['sttnaam'].replace('None', np.nan)
adres['hsltr'] = adres['hsltr'].replace('None', np.nan)
adres['toev'] = adres['toev'].replace('None', np.nan)
adres['huisnummer_nummeraanduiding'] = adres['huisnummer_nummeraanduiding'].replace(-1, np.nan)
adres['huisletter_nummeraanduiding'] = adres['huisletter_nummeraanduiding'].replace('None', np.nan)
adres['_openbare_ruimte_naam_nummeraanduiding'] = adres['_openbare_ruimte_naam_nummeraanduiding'].replace('None', np.nan)
adres['huisnummer_toevoeging_nummeraanduiding'] = adres['huisnummer_toevoeging_nummeraanduiding'].replace('None', np.nan)
return adres
def match_bwv_bag(self, adres, bag):
# Merge dataframes on adres dataframe.
new_df = pd.merge(adres, bag, how='left', left_on=['sttnaam','hsnr'], right_on = ['_openbare_ruimte_naam_nummeraanduiding', 'huisnummer_nummeraanduiding'])
# Find id's that have a direct match and that have multiple matches.
g = new_df.groupby('adres_id')
df_direct = g.filter(lambda x: len(x) == 1)
df_multiple = g.filter(lambda x: len(x) > 1)
# Make multiplematch more specific to construct perfect match.
df_multiple = df_multiple[(df_multiple['hsltr'] == df_multiple['huisletter_nummeraanduiding']) & (df_multiple['toev'] == df_multiple['huisnummer_toevoeging_nummeraanduiding'])]
# Concat df_direct and df_multiple.
df_result = pd.concat([df_direct, df_multiple])
# Because of the seperation of an object, there are two matching objects. Keep the oldest object with definif point.
df_result = df_result.sort_values(['adres_id', 'status_coordinaat_code'])
df_result = df_result.drop_duplicates(subset='adres_id', keep='first')
# Add adresses without match.
final_df = pd.merge(adres, df_result, how='left', on='adres_id', suffixes=('', '_y'))
final_df.drop(list(final_df.filter(regex='_y$')), axis=1, inplace=True)
# Set the name of the final adres dataframe again.
final_df.name = 'adres'
return final_df
def impute_values_for_bagless_addresses(self, adres):
"""Impute values for adresses where no BAG-match could be found."""
clean.impute_missing_values(adres)
# clean.impute_missing_values_mode(adres, ['status_coordinaat_code@bag'])
adres.fillna(value={'huisnummer_nummeraanduiding': 0,
'huisletter_nummeraanduiding': 'None',
'_openbare_ruimte_naam_nummeraanduiding': 'None',
'huisnummer_toevoeging_nummeraanduiding': 'None',
'type_woonobject_omschrijving': 'None',
'eigendomsverhouding_id': 'None',
'financieringswijze_id': -1,
'gebruik_id': -1,
'reden_opvoer_id': -1,
'status_id_verblijfsobject': -1,
'toegang_id': 'None'}, inplace=True)
return adres
def enrich_with_bag(self, bag):
"""Enrich the adres data with information from the BAG data. Uses the bag dataframe as input."""
bag = self.prepare_bag(bag)
self.data = self.prepare_adres(self.data)
self.data = self.match_bwv_bag(self.data, bag)
self.data = self.replace_string_nan_adres(self.data)
self.data = self.impute_values_for_bagless_addresses(self.data)
self.version += '_bag'
self.save()
print("The adres dataset is now enriched with BAG data.")
def enrich_with_personen_features(self, personen):
"""Add aggregated features relating to persons to the address dataframe. Uses the personen dataframe as input."""
# Create simple handle to the adres data.
adres = self.data
# Compute age of people in years (float)
today = pd.to_datetime('today')
# Set all dates within range allowed by Pandas (584 years?)
personen['geboortedatum'] = pd.to_datetime(personen['geboortedatum'], errors='coerce')
# Get the most frequent birthdate (mode).
geboortedatum_mode = personen['geboortedatum'].mode()[0]
# Compute the age (result is a TimeDelta).
personen['leeftijd'] = today - personen['geboortedatum']
# Convert the age to an approximation in years ("smearin out" the leap years).
personen['leeftijd'] = personen['leeftijd'].apply(lambda x: x.days / 365.25)
# Find the matching address ids between the adres df and the personen df.
adres_ids = adres.adres_id
personen_adres_ids = personen.ads_id_wa
intersect = set(adres_ids).intersection(set(personen_adres_ids))
# Iterate over all matching address ids and find all people at each address.
inhabitant_locs = {}
print("Now looping over all address ids that have a link with one or more inhabitants...")
for i, adres_id in enumerate(intersect):
if i % 1000 == 0:
print(i)
inhabitant_locs[adres_id] = personen_adres_ids[personen_adres_ids == adres_id]
# Create a new column in the dataframe showing the amount of people at each address.
# TODO: this step currently takes a few minutes to complete, should still be optimized.
adres['aantal_personen'] = 0
adres['aantal_vertrokken_personen'] = -1
adres['aantal_overleden_personen'] = -1
adres['aantal_niet_uitgeschrevenen'] = -1
adres['leegstand'] = True
adres['leeftijd_jongste_persoon'] = -1.
adres['leeftijd_oudste_persoon'] = -1.
adres['aantal_kinderen'] = 0
adres['percentage_kinderen'] = -1.
adres['aantal_mannen'] = 0
adres['percentage_mannen'] = -1.
adres['gemiddelde_leeftijd'] = -1.
adres['stdev_leeftijd'] = -1.
adres['aantal_achternamen'] = 0
adres['percentage_achternamen'] = -1.
for i in range(1,8):
adres[f'gezinsverhouding_{i}'] = 0
adres[f'percentage_gezinsverhouding_{i}'] = 0.
print("Now looping over all rows in the adres dataframe in order to add person information...")
for i in adres.index:
if i % 1000 == 0:
print(i)
row = adres.iloc[i]
adres_id = row['adres_id']
try:
# Get the inhabitants for the current address.
inhab_locs = inhabitant_locs[adres_id].keys()
inhab = personen.loc[inhab_locs]
# Check whether any registered inhabitants have left Amsterdam or have passed away.
aantal_vertrokken_personen = sum(inhab["vertrekdatum_adam"].notnull())
aantal_overleden_personen = sum(inhab["overlijdensdatum"].notnull())
aantal_niet_uitgeschrevenen = len(inhab[inhab["vertrekdatum_adam"].notnull() | inhab["overlijdensdatum"].notnull()])
adres['aantal_vertrokken_personen'] = aantal_vertrokken_personen
adres['aantal_overleden_personen'] = aantal_overleden_personen
adres['aantal_niet_uitgeschrevenen'] = aantal_niet_uitgeschrevenen
# If there are more inhabitants than people that are incorrectly still registered, then there is no 'leegstand'.
if len(inhab) > aantal_niet_uitgeschrevenen:
adres['leegstand'] = False
# Totaal aantal personen (int).
aantal_personen = len(inhab)
adres.at[i, 'aantal_personen'] = aantal_personen
# Leeftijd jongste persoon (float).
leeftijd_jongste_persoon = min(inhab['leeftijd'])
adres.at[i, 'leeftijd_jongste_persoon'] = leeftijd_jongste_persoon
# Leeftijd oudste persoon (float).
leeftijd_oudste_persoon = max(inhab['leeftijd'])
adres.at[i, 'leeftijd_oudste_persoon'] = leeftijd_oudste_persoon
# Aantal kinderen ingeschreven op adres (int/float).
aantal_kinderen = sum(inhab['leeftijd'] < 18)
adres.at[i, 'aantal_kinderen'] = aantal_kinderen
adres.at[i, 'percentage_kinderen'] = aantal_kinderen / aantal_personen
# Aantal mannen (int/float).
aantal_mannen = sum(inhab.geslacht == 'M')
adres.at[i, 'aantal_mannen'] = aantal_mannen
adres.at[i, 'percentage_mannen'] = aantal_mannen / aantal_personen
# Gemiddelde leeftijd (float).
gemiddelde_leeftijd = inhab.leeftijd.mean()
adres.at[i, 'gemiddelde_leeftijd'] = gemiddelde_leeftijd
# Standardeviatie van leeftijd (float). Set to 0 when the sample size is 1.
stdev_leeftijd = inhab.leeftijd.std()
adres.at[i, 'stdev_leeftijd'] = stdev_leeftijd if aantal_personen > 1 else 0
# Aantal verschillende achternamen (int/float).
aantal_achternamen = inhab.naam.nunique()
adres.at[i, 'aantal_achternamen'] = aantal_achternamen
adres.at[i, 'percentage_achternamen'] = aantal_achternamen / aantal_personen
# Gezinsverhouding (frequency count per klasse) (int/float).
gezinsverhouding = inhab.gezinsverhouding.value_counts()
for key in gezinsverhouding.keys():
val = gezinsverhouding[key]
adres.at[i, f'gezinsverhouding_{key}'] = val
adres.at[i, f'percentage_gezinsverhouding_{key}'] = val / aantal_personen
except (KeyError, ValueError) as e:
pass
print("...done!")
self.data = adres
self.version += '_personen'
self.save()
print("The adres dataset is now enriched with personen data.")
def add_hotline_features(self, hotline):
"""Add the hotline features to the adres dataframe."""
# Create a temporary merged df using the adres and hotline dataframes.
merge = self.data.merge(hotline, on='wng_id', how='left')
# Create a group for each adres_id
adres_groups = merge.groupby(by='adres_id')
# Count the number of hotline meldingen per group/adres_id.
# 'id' should be the primary key of hotline df, so it is usable for hotline entry counting.
hotline_counts = adres_groups['id'].agg(['count'])
# Rename column
hotline_counts.columns = ['aantal_hotline_meldingen']
# Enrich the 'adres' dataframe with the computed hotline counts.
self.data = self.data.merge(hotline_counts, on='adres_id', how='left')
self.version += '_hotline'
self.save()
print("The adres dataset is now enriched with hotline data.") | codebase/datasets/adres_dataset.py | 13,802 | Create a dataset for the adres data.
Add the hotline features to the adres dataframe.
Enrich the adres data with information from the BAG data. Uses the bag dataframe as input.
Add aggregated features relating to persons to the address dataframe. Uses the personen dataframe as input.
Add woning ids to the adres dataframe.
Create a column indicating leegstand (no inhabitants on the address).
Impute values for adresses where no BAG-match could be found.
adres_dataset.py
This module implements several classes to perform dataset-specific downloading, saving and
data-transformation operations.
Written by Swaan Dekkers & Thomas Jongstra
Imports Import own modules. Define HOME and DATA_PATH on a global level. Home path for old VAO. USERNAME = os.path.basename(HOME) HOME = os.path.join('/data', USERNAME) Set home for new VAO. AdresDataset class Set the class attributes. To int Fillna and replace '' bag['_openbare_ruimte_naam@bag'] = bag['_openbare_ruimte_naam@bag'].fillna('None') bag['_huisnummer_toevoeging@bag'] = bag['_huisnummer_toevoeging@bag'].fillna('None') To int Merge dataframes on adres dataframe. Find id's that have a direct match and that have multiple matches. Make multiplematch more specific to construct perfect match. Concat df_direct and df_multiple. Because of the seperation of an object, there are two matching objects. Keep the oldest object with definif point. Add adresses without match. Set the name of the final adres dataframe again. clean.impute_missing_values_mode(adres, ['status_coordinaat_code@bag']) Create simple handle to the adres data. Compute age of people in years (float) Set all dates within range allowed by Pandas (584 years?) Get the most frequent birthdate (mode). Compute the age (result is a TimeDelta). Convert the age to an approximation in years ("smearin out" the leap years). Find the matching address ids between the adres df and the personen df. Iterate over all matching address ids and find all people at each address. Create a new column in the dataframe showing the amount of people at each address. TODO: this step currently takes a few minutes to complete, should still be optimized. Get the inhabitants for the current address. Check whether any registered inhabitants have left Amsterdam or have passed away. If there are more inhabitants than people that are incorrectly still registered, then there is no 'leegstand'. Totaal aantal personen (int). Leeftijd jongste persoon (float). Leeftijd oudste persoon (float). Aantal kinderen ingeschreven op adres (int/float). Aantal mannen (int/float). Gemiddelde leeftijd (float). Standardeviatie van leeftijd (float). Set to 0 when the sample size is 1. Aantal verschillende achternamen (int/float). Gezinsverhouding (frequency count per klasse) (int/float). Create a temporary merged df using the adres and hotline dataframes. Create a group for each adres_id Count the number of hotline meldingen per group/adres_id. 'id' should be the primary key of hotline df, so it is usable for hotline entry counting. Rename column Enrich the 'adres' dataframe with the computed hotline counts. | 3,108 | en | 0.652637 |
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.visualization.decisiontree import variants, visualizer
| ws2122-lspm/Lib/site-packages/pm4py/visualization/decisiontree/__init__.py | 784 | This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>. | 665 | en | 0.922242 |
from __future__ import division
from keras.callbacks import Callback
from .generic_plot import PlotLosses
metric2printable = {
"acc": "Accuracy",
"mean_squared_error": "Mean squared error",
"mean_absolute_error": "Mean absolute error",
"mean_absolute_percentage_error": "Mean absolute percentage error",
# etc
"categorical_crossentropy": "Log-loss",
"sparse_categorical_crossentropy": "Log-loss",
"binary_crossentropy": "Log-loss",
"kullback_leibler_divergence": "Log-loss"
}
def loss2name(loss):
if hasattr(loss, '__call__'):
# if passed as a function
return loss.__name__
else:
# if passed as a string
return loss
class PlotLossesKeras(Callback):
def __init__(self, **kwargs):
super(PlotLossesKeras, self).__init__()
self.liveplot = PlotLosses(**kwargs)
def on_train_begin(self, logs={}):
self.liveplot.set_metrics([
metric for metric in self.params['metrics']
if not metric.startswith('val_')
])
# slightly convolved due to model.complie(loss=...) stuff
# vide https://github.com/keras-team/keras/blob/master/keras/engine/training.py
if isinstance(self.model.loss, list):
losses = self.model.loss
elif isinstance(self.model.loss, dict):
losses = list(self.model.loss.values())
else:
# by far the most common scenario
losses = [self.model.loss]
metric2printable_updated = metric2printable.copy()
loss_name = loss2name(losses[0])
metric2printable_updated['loss'] =\
"{} (cost function)".format(metric2printable_updated.get(loss_name, loss_name))
if len(losses) > 1:
for output_name, loss in zip(self.model.output_names, losses):
loss_name = loss2name(loss)
metric2printable_updated['{}_loss'.format(output_name)] =\
"{} ({})".format(metric2printable_updated.get(loss_name, loss_name), output_name)
else:
for output_name in self.model.output_names:
metric2printable_updated['{}_loss'.format(output_name)] =\
"{} ({})".format(metric2printable_updated.get(loss_name, loss_name), output_name)
self.liveplot.metric2title = metric2printable_updated
self.liveplot.set_max_epoch(self.params['epochs'])
def on_epoch_end(self, epoch, logs={}):
self.liveplot.update(logs.copy())
self.liveplot.draw()
| livelossplot/keras_plot.py | 2,530 | etc if passed as a function if passed as a string slightly convolved due to model.complie(loss=...) stuff vide https://github.com/keras-team/keras/blob/master/keras/engine/training.py by far the most common scenario | 215 | en | 0.833561 |
import numpy as np
from .tensor import Function
# ************* unary ops *************
class ReLU(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.maximum(input, 0)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output * (input >= 0)
class Log(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.log(input)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output / input
class Exp(Function):
@staticmethod
def forward(ctx, input):
ret = np.exp(input)
ctx.save_for_backward(ret)
return ret
@staticmethod
def backward(ctx, grad_output):
ret, = ctx.saved_tensors
return grad_output * ret
# ************* reduce ops *************
class Sum(Function):
@staticmethod
def forward(ctx, input, axis=None):
ctx.save_for_backward(input, axis)
return np.array([input.sum()]) if axis is None else input.sum(axis=axis)
@staticmethod
def backward(ctx, grad_output):
input, axis = ctx.saved_tensors
axis = [axis] if type(axis) is int else axis
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
return grad_output.reshape(shape) + np.zeros_like(input)
class Max(Function):
@staticmethod
def forward(ctx, inp, axis=None):
axis = [axis] if type(axis) == int else axis
ret = np.amax(inp, axis=None if axis is None else tuple(axis), keepdims=True)
ctx.save_for_backward(inp, axis, ret)
if axis is not None:
ret = ret.reshape([inp.shape[i] for i in range(len(inp.shape)) if i not in axis])
return ret
@staticmethod
def backward(ctx, grad_output):
input, axis, ret = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
ret2 = (input == ret.reshape(shape))
div = ret2.sum(axis=None if axis is None else tuple(axis), keepdims=True)
return ret2 * grad_output.reshape(shape) / div
# ************* binary ops *************
def unbroadcast(out, in_sh):
# adjoint operation to broadcast is sum. Need to sum all axis with 1 = in_sh[i] < out.shape[i]
sum_axis = tuple([i for i in range(len(in_sh)) if in_sh[i] == 1 and out.shape[i] > 1]) if in_sh != (1,) else None
return out.sum(axis=sum_axis).reshape(in_sh)
class Add(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return x + y
@staticmethod
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(grad_output, shape_y)
class Sub(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return x - y
@staticmethod
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(-grad_output, shape_y)
class Mul(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return x * y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return unbroadcast(y * grad_output, x.shape), unbroadcast(x * grad_output, y.shape)
class Pow(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return x ** y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return unbroadcast(y * (x ** (y - 1.0)) * grad_output, x.shape), \
unbroadcast((x ** y) * np.log(x) * grad_output, y.shape)
# ************* movement ops *************
class Reshape(Function):
@staticmethod
def forward(ctx, x, shape):
ctx.save_for_backward(x.shape)
return x.reshape(shape)
@staticmethod
def backward(ctx, grad_output):
in_shape, = ctx.saved_tensors
return grad_output.reshape(in_shape)
class Transpose(Function):
@staticmethod
def forward(ctx, x, order):
ctx.save_for_backward(order)
return np.transpose(x, order)
@staticmethod
def backward(ctx, x):
return np.transpose(x, np.argsort(ctx.order))
def inner_slice(x, arg):
padding = [(max(0, -p[0]), max(0, p[1] - x.shape[i])) for i, p in enumerate(arg)]
x = np.pad(x, padding)
slicee = [(p[0] + padding[i][0], p[1] + padding[i][0]) for i, p in enumerate(arg)]
return x[tuple([slice(x[0], x[1], None) for x in slicee])]
class Slice(Function):
@staticmethod
def forward(ctx, x, arg=None):
ctx.save_for_backward(x.shape)
return inner_slice(x, arg)
@staticmethod
def backward(ctx, grad_output):
shape, = ctx.saved_tensors
narg = [(0 - p[0], grad_output.shape[i] + (shape[i] - p[1])) for i, p in enumerate(ctx.arg)]
return inner_slice(grad_output, narg)
# ************* processing ops *************
class Matmul(Function):
@staticmethod
def forward(ctx, input, weight):
ctx.save_for_backward(input, weight)
return input @ weight
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input = grad_output @ np.swapaxes(weight, -2, -1)
grad_weight = np.swapaxes(input, -2, -1) @ grad_output
return grad_input, grad_weight
class Conv2D(Function):
@staticmethod
def forward(ctx, x, w, stride=1, groups=1):
if type(ctx.stride) == int:
ctx.stride = (ctx.stride, ctx.stride)
cout, cin, H, W = w.shape
ys, xs = ctx.stride
bs, cin_ = x.shape[0], x.shape[1]
oy, ox = (x.shape[2] - (H - ys)) // ys, (x.shape[3] - (W - xs)) // xs
assert cin * ctx.groups == cin_
assert cout % ctx.groups == 0
rcout = cout // ctx.groups
gx = x.reshape(bs, ctx.groups, cin, x.shape[2], x.shape[3])
tx = np.lib.stride_tricks.as_strided(gx,
shape=(bs, ctx.groups, cin, oy, ox, H, W),
strides=(*gx.strides[0:3], gx.strides[3] * ys, gx.strides[4] * xs,
*gx.strides[3:5]),
writeable=False,
)
tw = w.reshape(ctx.groups, rcout, cin, H, W)
ctx.save_for_backward(tx, tw, x.shape)
ret = np.zeros((bs, ctx.groups, oy, ox, rcout), dtype=x.dtype)
for g in range(ctx.groups):
# ijYXyx,kjyx -> iYXk ->ikYX
ret[:, g] += np.tensordot(tx[:, g], tw[g], ((1, 4, 5), (1, 2, 3)))
return np.moveaxis(ret, 4, 2).reshape(bs, cout, oy, ox)
@staticmethod
def backward(ctx, grad_output):
bs, _, oy, ox = grad_output.shape
tx, tw, x_shape = ctx.saved_tensors
_, rcout, cin, H, W = tw.shape
ys, xs = ctx.stride
OY, OX = x_shape[2:4]
ggg = grad_output.reshape(bs, ctx.groups, rcout, oy, ox)
gdw = np.zeros((ctx.groups, rcout, cin, H, W), dtype=tx.dtype)
for g in range(ctx.groups):
# 'ikYX,ijYXyx -> kjyx'
gdw[g] += np.tensordot(ggg[:, g], tx[:, g], ((0, 2, 3), (0, 2, 3)))
# needs to be optimized
gdx = np.zeros((bs, ctx.groups, cin, OY, OX), dtype=tx.dtype)
for k in range(oy * ox):
Y, X = k // ox, k % ox
iY, iX = Y * ys, X * xs
# gdx[:,:,: , iY:iY+H, iX:iX+W] += np.einsum('igk,gkjyx->igjyx', ggg[:,:,:,Y,X], tw)
for g in range(ctx.groups):
tg = np.dot(ggg[:, g, :, Y, X].reshape(bs, -1), tw[g].reshape(rcout, -1))
gdx[:, g, :, iY:iY + H, iX:iX + W] += tg.reshape((bs, cin, H, W))
return gdx.reshape((bs, ctx.groups * cin, OY, OX)), gdw.reshape((ctx.groups * rcout, cin, H, W))
| tinygrad/ops_cpu.py | 8,145 | ************* unary ops ************* ************* reduce ops ************* ************* binary ops ************* adjoint operation to broadcast is sum. Need to sum all axis with 1 = in_sh[i] < out.shape[i] ************* movement ops ************* ************* processing ops ************* ijYXyx,kjyx -> iYXk ->ikYX 'ikYX,ijYXyx -> kjyx' needs to be optimized gdx[:,:,: , iY:iY+H, iX:iX+W] += np.einsum('igk,gkjyx->igjyx', ggg[:,:,:,Y,X], tw) | 446 | en | 0.611234 |
import numpy as np
import tensorflow as tf
# ----------------------------------------------------------------------------
def SubPixel1D_v2(I, r):
"""One-dimensional subpixel upsampling layer
Based on https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py
"""
with tf.compat.v1.name_scope('subpixel'):
bsize, a, r = I.get_shape().as_list()
bsize = tf.shape(input=I)[0] # Handling Dimension(None) type for undefined batch dim
X = tf.split(1, a, I) # a, [bsize, 1, r]
if 'axis' in tf.squeeze.__code__.co_varnames:
X = tf.concat(1, [tf.squeeze(x, axis=1) for x in X]) # bsize, a*r
elif 'squeeze_dims' in tf.squeeze.__code__.co_varnames:
X = tf.concat(1, [tf.squeeze(x, axis=[1]) for x in X]) # bsize, a*r
else:
raise Exception('Unsupported version of tensorflow')
return tf.reshape(X, (bsize, a*r, 1))
def SubPixel1D(I, r):
"""One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r)
"""
with tf.compat.v1.name_scope('subpixel'):
X = tf.transpose(a=I, perm=[2,1,0]) # (r, w, b)
X = tf.batch_to_space(X, [r], [[0,0]]) # (1, r*w, b)
X = tf.transpose(a=X, perm=[2,1,0])
return X
def SubPixel1D_multichan(I, r):
"""One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r).
Works with multiple channels: (B,L,rC) -> (B,rL,C)
"""
with tf.compat.v1.name_scope('subpixel'):
_, w, rc = I.get_shape()
assert rc % r == 0
c = rc / r
X = tf.transpose(a=I, perm=[2,1,0]) # (rc, w, b)
X = tf.batch_to_space(X, [r], [[0,0]]) # (c, r*w, b)
X = tf.transpose(a=X, perm=[2,1,0])
return X
# ----------------------------------------------------------------------------
# demonstration
if __name__ == "__main__":
with tf.compat.v1.Session() as sess:
x = np.arange(2*4*2).reshape(2, 4, 2)
X = tf.compat.v1.placeholder("float32", shape=(2, 4, 2), name="X")
Y = SubPixel1D(X, 2)
y = sess.run(Y, feed_dict={X: x})
print('single-channel:')
print('original, element 0 (2 channels):', x[0,:,0], x[0,:,1])
print('rescaled, element 1:', y[0,:,0])
print()
print('original, element 0 (2 channels) :', x[1,:,0], x[1,:,1])
print('rescaled, element 1:', y[1,:,0])
print()
x = np.arange(2*4*4).reshape(2, 4, 4)
X = tf.compat.v1.placeholder("float32", shape=(2, 4, 4), name="X")
Y = SubPixel1D(X, 2)
y = sess.run(Y, feed_dict={X: x})
print('multichannel:')
print('original, element 0 (4 channels):', x[0,:,0], x[0,:,1], x[0,:,2], x[0,:,3])
print('rescaled, element 1:', y[0,:,0], y[0,:,1])
print()
print('original, element 0 (2 channels) :', x[1,:,0], x[1,:,1], x[1,:,2], x[1,:,3])
print('rescaled, element 1:', y[1,:,0], y[1,:,1], end=' ')
| src/models/layers/subpixel.py | 2,930 | One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r)
One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r).
Works with multiple channels: (B,L,rC) -> (B,rL,C)
One-dimensional subpixel upsampling layer
Based on https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py
---------------------------------------------------------------------------- Handling Dimension(None) type for undefined batch dim a, [bsize, 1, r] bsize, a*r bsize, a*r (r, w, b) (1, r*w, b) (rc, w, b) (c, r*w, b) ---------------------------------------------------------------------------- demonstration | 792 | en | 0.558592 |
import logging
import boto3
from botocore.vendored.requests.packages.urllib3.exceptions import ResponseError
from django.core.mail.backends.base import BaseEmailBackend
from django_ses import settings
from datetime import datetime, timedelta
from time import sleep
try:
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
# Shim for Python 3.7. Remove when support is dropped.
import importlib_metadata
__version__ = importlib_metadata.version(__name__)
__all__ = ('SESBackend',)
# These would be nice to make class-level variables, but the backend is
# re-created for each outgoing email/batch.
# recent_send_times also is not going to work quite right if there are multiple
# email backends with different rate limits returned by SES, but that seems
# like it would be rare.
cached_rate_limits = {}
recent_send_times = []
logger = logging.getLogger('django_ses')
def dkim_sign(message, dkim_domain=None, dkim_key=None, dkim_selector=None, dkim_headers=None):
"""Return signed email message if dkim package and settings are available."""
try:
import dkim
except ImportError:
pass
else:
if dkim_domain and dkim_key:
sig = dkim.sign(message,
dkim_selector,
dkim_domain,
dkim_key,
include_headers=dkim_headers)
message = sig + message
return message
def cast_nonzero_to_float(val):
"""Cast nonzero number to float; on zero or None, return None"""
if not val:
return None
return float(val)
class SESBackend(BaseEmailBackend):
"""A Django Email backend that uses Amazon's Simple Email Service.
"""
def __init__(self, fail_silently=False, aws_access_key=None,
aws_secret_key=None, aws_region_name=None,
aws_region_endpoint=None, aws_auto_throttle=None, aws_config=None,
dkim_domain=None, dkim_key=None, dkim_selector=None, dkim_headers=None,
ses_source_arn=None, ses_from_arn=None, ses_return_path_arn=None,
**kwargs):
super(SESBackend, self).__init__(fail_silently=fail_silently, **kwargs)
self._access_key_id = aws_access_key or settings.ACCESS_KEY
self._access_key = aws_secret_key or settings.SECRET_KEY
self._region_name = aws_region_name if aws_region_name else settings.AWS_SES_REGION_NAME
self._endpoint_url = aws_region_endpoint if aws_region_endpoint else settings.AWS_SES_REGION_ENDPOINT_URL
self._throttle = cast_nonzero_to_float(aws_auto_throttle or settings.AWS_SES_AUTO_THROTTLE)
self._config = aws_config or settings.AWS_SES_CONFIG
self.dkim_domain = dkim_domain or settings.DKIM_DOMAIN
self.dkim_key = dkim_key or settings.DKIM_PRIVATE_KEY
self.dkim_selector = dkim_selector or settings.DKIM_SELECTOR
self.dkim_headers = dkim_headers or settings.DKIM_HEADERS
self.ses_source_arn = ses_source_arn or settings.AWS_SES_SOURCE_ARN
self.ses_from_arn = ses_from_arn or settings.AWS_SES_FROM_ARN
self.ses_return_path_arn = ses_return_path_arn or settings.AWS_SES_RETURN_PATH_ARN
self.connection = None
def open(self):
"""Create a connection to the AWS API server. This can be reused for
sending multiple emails.
"""
if self.connection:
return False
try:
self.connection = boto3.client(
'ses',
aws_access_key_id=self._access_key_id,
aws_secret_access_key=self._access_key,
region_name=self._region_name,
endpoint_url=self._endpoint_url,
config=self._config
)
except Exception:
if not self.fail_silently:
raise
def close(self):
"""Close any open HTTP connections to the API server.
"""
self.connection = None
def send_messages(self, email_messages):
"""Sends one or more EmailMessage objects and returns the number of
email messages sent.
"""
if not email_messages:
return
new_conn_created = self.open()
if not self.connection:
# Failed silently
return
num_sent = 0
source = settings.AWS_SES_RETURN_PATH
for message in email_messages:
# SES Configuration sets. If the AWS_SES_CONFIGURATION_SET setting
# is not None, append the appropriate header to the message so that
# SES knows which configuration set it belongs to.
#
# If settings.AWS_SES_CONFIGURATION_SET is a callable, pass it the
# message object and dkim settings and expect it to return a string
# containing the SES Configuration Set name.
if (settings.AWS_SES_CONFIGURATION_SET
and 'X-SES-CONFIGURATION-SET' not in message.extra_headers):
if callable(settings.AWS_SES_CONFIGURATION_SET):
message.extra_headers[
'X-SES-CONFIGURATION-SET'] = settings.AWS_SES_CONFIGURATION_SET(
message,
dkim_domain=self.dkim_domain,
dkim_key=self.dkim_key,
dkim_selector=self.dkim_selector,
dkim_headers=self.dkim_headers
)
else:
message.extra_headers[
'X-SES-CONFIGURATION-SET'] = settings.AWS_SES_CONFIGURATION_SET
# Automatic throttling. Assumes that this is the only SES client
# currently operating. The AWS_SES_AUTO_THROTTLE setting is a
# factor to apply to the rate limit, with a default of 0.5 to stay
# well below the actual SES throttle.
# Set the setting to 0 or None to disable throttling.
if self._throttle:
global recent_send_times
now = datetime.now()
# Get and cache the current SES max-per-second rate limit
# returned by the SES API.
rate_limit = self.get_rate_limit()
logger.debug("send_messages.throttle rate_limit='{}'".format(rate_limit))
# Prune from recent_send_times anything more than a few seconds
# ago. Even though SES reports a maximum per-second, the way
# they enforce the limit may not be on a one-second window.
# To be safe, we use a two-second window (but allow 2 times the
# rate limit) and then also have a default rate limit factor of
# 0.5 so that we really limit the one-second amount in two
# seconds.
window = 2.0 # seconds
window_start = now - timedelta(seconds=window)
new_send_times = []
for time in recent_send_times:
if time > window_start:
new_send_times.append(time)
recent_send_times = new_send_times
# If the number of recent send times in the last 1/_throttle
# seconds exceeds the rate limit, add a delay.
# Since I'm not sure how Amazon determines at exactly what
# point to throttle, better be safe than sorry and let in, say,
# half of the allowed rate.
if len(new_send_times) > rate_limit * window * self._throttle:
# Sleep the remainder of the window period.
delta = now - new_send_times[0]
total_seconds = (delta.microseconds + (delta.seconds +
delta.days * 24 * 3600) * 10**6) / 10**6
delay = window - total_seconds
if delay > 0:
sleep(delay)
recent_send_times.append(now)
# end of throttling
kwargs = dict(
Source=source or message.from_email,
Destinations=message.recipients(),
# todo attachments?
RawMessage={'Data': dkim_sign(message.message().as_string(),
dkim_key=self.dkim_key,
dkim_domain=self.dkim_domain,
dkim_selector=self.dkim_selector,
dkim_headers=self.dkim_headers)}
)
if self.ses_source_arn:
kwargs['SourceArn'] = self.ses_source_arn
if self.ses_from_arn:
kwargs['FromArn'] = self.ses_from_arn
if self.ses_return_path_arn:
kwargs['ReturnPathArn'] = self.ses_return_path_arn
try:
response = self.connection.send_raw_email(**kwargs)
message.extra_headers['status'] = 200
message.extra_headers['message_id'] = response['MessageId']
message.extra_headers['request_id'] = response['ResponseMetadata']['RequestId']
num_sent += 1
if 'X-SES-CONFIGURATION-SET' in message.extra_headers:
logger.debug(
"send_messages.sent from='{}' recipients='{}' message_id='{}' request_id='{}' "
"ses-configuration-set='{}'".format(
message.from_email,
", ".join(message.recipients()),
message.extra_headers['message_id'],
message.extra_headers['request_id'],
message.extra_headers['X-SES-CONFIGURATION-SET']
))
else:
logger.debug("send_messages.sent from='{}' recipients='{}' message_id='{}' request_id='{}'".format(
message.from_email,
", ".join(message.recipients()),
message.extra_headers['message_id'],
message.extra_headers['request_id']
))
except ResponseError as err:
# Store failure information so to post process it if required
error_keys = ['status', 'reason', 'body', 'request_id',
'error_code', 'error_message']
for key in error_keys:
message.extra_headers[key] = getattr(err, key, None)
if not self.fail_silently:
raise
if new_conn_created:
self.close()
return num_sent
def get_rate_limit(self):
if self._access_key_id in cached_rate_limits:
return cached_rate_limits[self._access_key_id]
new_conn_created = self.open()
if not self.connection:
raise Exception(
"No connection is available to check current SES rate limit.")
try:
quota_dict = self.connection.get_send_quota()
max_per_second = quota_dict['MaxSendRate']
ret = float(max_per_second)
cached_rate_limits[self._access_key_id] = ret
return ret
finally:
if new_conn_created:
self.close()
| django_ses/__init__.py | 11,494 | A Django Email backend that uses Amazon's Simple Email Service.
Cast nonzero number to float; on zero or None, return None
Close any open HTTP connections to the API server.
Return signed email message if dkim package and settings are available.
Create a connection to the AWS API server. This can be reused for
sending multiple emails.
Sends one or more EmailMessage objects and returns the number of
email messages sent.
Shim for Python 3.7. Remove when support is dropped. These would be nice to make class-level variables, but the backend is re-created for each outgoing email/batch. recent_send_times also is not going to work quite right if there are multiple email backends with different rate limits returned by SES, but that seems like it would be rare. Failed silently SES Configuration sets. If the AWS_SES_CONFIGURATION_SET setting is not None, append the appropriate header to the message so that SES knows which configuration set it belongs to. If settings.AWS_SES_CONFIGURATION_SET is a callable, pass it the message object and dkim settings and expect it to return a string containing the SES Configuration Set name. Automatic throttling. Assumes that this is the only SES client currently operating. The AWS_SES_AUTO_THROTTLE setting is a factor to apply to the rate limit, with a default of 0.5 to stay well below the actual SES throttle. Set the setting to 0 or None to disable throttling. Get and cache the current SES max-per-second rate limit returned by the SES API. Prune from recent_send_times anything more than a few seconds ago. Even though SES reports a maximum per-second, the way they enforce the limit may not be on a one-second window. To be safe, we use a two-second window (but allow 2 times the rate limit) and then also have a default rate limit factor of 0.5 so that we really limit the one-second amount in two seconds. seconds If the number of recent send times in the last 1/_throttle seconds exceeds the rate limit, add a delay. Since I'm not sure how Amazon determines at exactly what point to throttle, better be safe than sorry and let in, say, half of the allowed rate. Sleep the remainder of the window period. end of throttling todo attachments? Store failure information so to post process it if required | 2,269 | en | 0.853602 |
# -*- coding=UTF-8 -*-
# pyright: strict
from __future__ import annotations
import os
import sys
import subprocess
def main():
subprocess.call(
["npx", "pyright"],
env={
**os.environ,
"PATH": os.path.pathsep.join(
(
os.path.dirname(sys.executable),
os.getenv("PATH") or "",
)
),
},
shell=True,
)
if __name__ == "__main__":
main()
| scripts/run_pyright.py | 490 | -*- coding=UTF-8 -*- pyright: strict | 36 | en | 0.683297 |
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 60100
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| contrib/linearize/linearize-hashes.py | 3,037 | !/usr/bin/python linearize-hashes.py: List blocks in a linear, no-fork version of the chain. Copyright (c) 2013-2014 The Bitcoin developers Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. assume replies are in-sequence skip comment lines parse key=value lines | 349 | en | 0.711358 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
'''The Azure Command-line tool.
This tools provides a command-line interface to Azure's management and storage
APIs.
'''
import pkg_resources
pkg_resources.declare_namespace(__name__)
__author__ = "Microsoft Corporation <python@microsoft.com>"
__version__ = "2.0.17+dev"
| src/azure-cli/azure/cli/__init__.py | 618 | The Azure Command-line tool.
This tools provides a command-line interface to Azure's management and storage
APIs.
-------------------------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. -------------------------------------------------------------------------------------------- | 453 | en | 0.505296 |
#!/usr/bin/python
import os
import unittest
""" Script to run the Python tests. """
def run_python_tests():
""" Runs the Python tests.
Returns:
True if the tests all succeed, False if there are failures. """
print("Starting tests...")
loader = unittest.TestLoader()
# Get the directory this module is in.
dir_path = os.path.dirname(os.path.realpath(__file__))
suite = loader.discover("rhodopsin/tests", top_level_dir=dir_path)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if not test_result.wasSuccessful():
return False
return True
if __name__ == "__main__":
run_python_tests()
| run_tests.py | 636 | Runs the Python tests.
Returns:
True if the tests all succeed, False if there are failures.
!/usr/bin/python Get the directory this module is in. | 149 | en | 0.611513 |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Change `db_dbnode.type` for base `Data` types.
The base Data types Bool, Float, Int and Str have been moved in the source code, which means that their
module path changes, which determines the plugin type string which is stored in the databse.
The type string now will have a type string prefix that is unique to each sub type.
Revision ID: django_0009
Revises: django_0008
"""
from alembic import op
revision = 'django_0009'
down_revision = 'django_0008'
branch_labels = None
depends_on = None
def upgrade():
"""Migrations for the upgrade."""
op.execute(
"""
UPDATE db_dbnode SET type = 'data.bool.Bool.' WHERE type = 'data.base.Bool.';
UPDATE db_dbnode SET type = 'data.float.Float.' WHERE type = 'data.base.Float.';
UPDATE db_dbnode SET type = 'data.int.Int.' WHERE type = 'data.base.Int.';
UPDATE db_dbnode SET type = 'data.str.Str.' WHERE type = 'data.base.Str.';
UPDATE db_dbnode SET type = 'data.list.List.' WHERE type = 'data.base.List.';
"""
)
def downgrade():
"""Migrations for the downgrade."""
op.execute(
"""
UPDATE db_dbnode SET type = 'data.base.Bool.' WHERE type = 'data.bool.Bool.';
UPDATE db_dbnode SET type = 'data.base.Float.' WHERE type = 'data.float.Float.';
UPDATE db_dbnode SET type = 'data.base.Int.' WHERE type = 'data.int.Int.';
UPDATE db_dbnode SET type = 'data.base.Str.' WHERE type = 'data.str.Str.';
UPDATE db_dbnode SET type = 'data.base.List.' WHERE type = 'data.list.List.';
"""
)
| aiida/storage/psql_dos/migrations/versions/django_0009_base_data_plugin_type_string.py | 2,275 | Migrations for the downgrade.
Migrations for the upgrade.
Change `db_dbnode.type` for base `Data` types.
The base Data types Bool, Float, Int and Str have been moved in the source code, which means that their
module path changes, which determines the plugin type string which is stored in the databse.
The type string now will have a type string prefix that is unique to each sub type.
Revision ID: django_0009
Revises: django_0008
-*- coding: utf-8 -*- Copyright (c), The AiiDA team. All rights reserved. This file is part of the AiiDA code. The code is hosted on GitHub at https://github.com/aiidateam/aiida-core For further information on the license, see the LICENSE.txt file For further information please visit http://www.aiida.net pylint: disable=invalid-name,no-member | 934 | en | 0.876987 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VirtualMachineRuntimeInfo(vim, *args, **kwargs):
'''The RuntimeInfo data object type provides information about the execution state
and history of a virtual machine.'''
obj = vim.client.factory.create('{urn:vim25}VirtualMachineRuntimeInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 7:
raise IndexError('Expected at least 8 arguments got: %d' % len(args))
required = [ 'connectionState', 'consolidationNeeded', 'faultToleranceState',
'numMksConnections', 'powerState', 'recordReplayState', 'toolsInstallerMounted' ]
optional = [ 'bootTime', 'cleanPowerOff', 'dasVmProtection', 'device', 'host',
'maxCpuUsage', 'maxMemoryUsage', 'memoryOverhead', 'minRequiredEVCModeKey',
'needSecondaryReason', 'question', 'suspendInterval', 'suspendTime',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| pyvisdk/do/virtual_machine_runtime_info.py | 1,456 | The RuntimeInfo data object type provides information about the execution state
and history of a virtual machine.
Automatically generated, do not edit. do some validation checking... | 184 | en | 0.600621 |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Add non-adjusted next cycle start date
Revision ID: 44047daa31a9
Revises: 1431e7094e26
Create Date: 2015-07-07 14:31:27.780564
"""
# revision identifiers, used by Alembic.
revision = '44047daa31a9'
down_revision = '4840f4760f4b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from datetime import date
from ggrc.app import app
from ggrc import settings, db
import ggrc_workflows.models as models
from ggrc_workflows import adjust_next_cycle_start_date
from ggrc_workflows.services.workflow_cycle_calculator import \
get_cycle_calculator
def upgrade():
op.add_column('workflows',
sa.Column('non_adjusted_next_cycle_start_date',
sa.Date(), nullable=True))
# If somebody deleted all the tasks we must clear the next cycle start
# date
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date < date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
app.logger.warning(
"Removing NCSD from expired WF {} because no tasks are "
"set up. Current NCSD: {}".format(
workflow.id,
workflow.next_cycle_start_date
))
workflow.next_cycle_start_date = None
db.session.add(workflow)
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.non_adjusted_next_cycle_start_date == None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date >= date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
# We must skip tasks that don't have start days and end days defined
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
append_msg = ""
if workflow.next_cycle_start_date:
workflow.next_cycle_start_date = None
append_msg += (" Removing existing next cycle start date "
"because none are configured.")
db.session.add(workflow)
app.logger.warning(
"Skipping active WF {0} because no tasks "
"are set up.{1}".format(
workflow.id,
append_msg
))
continue
pre_compute_ncsd = workflow.next_cycle_start_date
last_cycle_start_date = None
if workflow.cycles:
last_cycle_start_date = max([c.start_date for c in workflow.cycles])
if last_cycle_start_date:
base_date = last_cycle_start_date
else:
base_date = base_date.today()
base_date = max(base_date, workflow.next_cycle_start_date)
calculator = get_cycle_calculator(workflow, base_date=base_date)
if workflow.frequency in {"weekly", "monthly"}:
nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_month = None
else:
nancsd_month, nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date)
if last_cycle_start_date:
while calculator.adjust_date(nancsd_date) <= last_cycle_start_date:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
else:
base_date = base_date - calculator.time_delta
while calculator.adjust_date(nancsd_date) <= pre_compute_ncsd:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
workflow.non_adjusted_next_cycle_start_date = nancsd_date
workflow.next_cycle_start_date = calculator.adjust_date(nancsd_date)
post_compute_ncsd = workflow.next_cycle_start_date
start_dates = ["{}/{}".format(
task.relative_start_month,
task.relative_start_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
end_dates = ["{}/{}".format(
task.relative_end_month,
task.relative_end_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
if pre_compute_ncsd != post_compute_ncsd:
app.logger.warning(
"Adjusted NCSD for workflow {}. "
"Freq: {}, PRE: {}, Last cycle: {}, POST: {}, NON: {},"
"tasks start: {}, tasks end: {},".format(
workflow.id,
workflow.frequency[:2],
pre_compute_ncsd,
last_cycle_start_date,
post_compute_ncsd,
workflow.non_adjusted_next_cycle_start_date,
start_dates,
end_dates))
db.session.add(workflow)
# Save
db.session.commit()
def downgrade():
op.drop_column('workflows', 'non_adjusted_next_cycle_start_date')
| src/ggrc_workflows/migrations/versions/20150707143127_44047daa31a9_add_non_adjusted_next_cycle_start_date.py | 6,752 | Add non-adjusted next cycle start date
Revision ID: 44047daa31a9
Revises: 1431e7094e26
Create Date: 2015-07-07 14:31:27.780564
Copyright (C) 2017 Google Inc. Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> revision identifiers, used by Alembic. If somebody deleted all the tasks we must clear the next cycle start date We must skip tasks that don't have start days and end days defined Save | 422 | en | 0.757924 |
A = 'A'
B = 'B'
Environment = {
A: 'Dirty',
B: 'Dirty',
'Current': A
}
def REFLEX_VACUUM_AGENT(loc_st): # Determine action
if loc_st[1] == 'Dirty':
return 'Suck'
if loc_st[0] == A:
return 'Right'
if loc_st[0] == B:
return 'Left'
def Sensors(): # Sense Environment
location = Environment['Current']
return (location, Environment[location])
def Actuators(action): # Modify Environment
location = Environment['Current']
if action == 'Suck':
Environment[location] = 'Clean'
elif action == 'Right' and location == A:
Environment['Current'] = B
elif action == 'Left' and location == B:
Environment['Current'] = A
def run(n, make_agent): # run the agent through n steps
print(' Current New')
print('location status action location status')
for i in range(1, n):
(location, status) = Sensors() # Sense Environment before action
print("{:12s}{:8s}".format(location, status), end='')
action = make_agent(Sensors())
Actuators(action)
(location, status) = Sensors() # Sense Environment after action
print("{:8s}{:12s}{:8s}".format(action, location, status))
if __name__ == '__main__':
run(10, REFLEX_VACUUM_AGENT)
| Lecture_3_Agents/Exercise1/Exercises/reflex_vacuum_agent.py | 1,302 | Determine action Sense Environment Modify Environment run the agent through n steps Sense Environment before action Sense Environment after action | 146 | en | 0.755196 |
def average_rating(rating_list):
if not rating_list:
# if rating_list is empty return 0
return 0
return round(sum(rating_list) / len(rating_list)) | bookr/reviews/utils.py | 172 | if rating_list is empty return 0 | 32 | en | 0.604533 |
import asyncio
import copy
import random
from typing import Callable
import pytest
from starkware.starknet.apps.starkgate.cairo.contracts import erc20_contract_def
from starkware.starknet.apps.starkgate.conftest import str_to_felt
from starkware.starknet.testing.contract import StarknetContract
from starkware.starknet.testing.starknet import Starknet
from starkware.starkware_utils.error_handling import StarkException
AMOUNT_BOUND = 2 ** 256
GOVERNOR_ADDRESS = str_to_felt("GOVERNOR")
MINTER_ADDRESS = str_to_felt("MINTER")
L1_ACCOUNT = 1
initial_balances = {1: 13, 2: 10}
uninitialized_account = 3
initial_total_supply = sum(initial_balances.values())
initialized_account = random.choice(list(initial_balances.keys()))
another_account = 4 # Not initialized_account and not uninitialized_account.
# 0 < TRANSFER_AMOUNT < APPROVE_AMOUNT < initial_balance < HIGH_APPROVE_AMOUNT.
TRANSFER_AMOUNT = int((initial_balances[initialized_account] + 1) / 2)
APPROVE_AMOUNT = 8
HIGH_APPROVE_AMOUNT = 100
MINT_AMOUNT = 10
BURN_AMOUNT = int((initial_balances[initialized_account] + 1) / 2)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="session")
async def session_starknet() -> Starknet:
return await Starknet.empty()
@pytest.fixture(scope="session")
async def session_empty_token_contract(
session_starknet: Starknet,
token_name: int,
token_symbol: int,
token_decimals: int,
) -> StarknetContract:
return await session_starknet.deploy(
constructor_calldata=[
token_name,
token_symbol,
token_decimals,
MINTER_ADDRESS,
],
contract_def=erc20_contract_def,
)
@pytest.fixture(scope="session")
async def uint256(session_empty_token_contract: StarknetContract) -> Callable:
def convert_int_to_uint256(num: int):
if num < 0:
num += 2 ** 256
return session_empty_token_contract.Uint256(low=num % 2 ** 128, high=num // 2 ** 128)
return convert_int_to_uint256
@pytest.fixture(scope="session")
async def session_token_contract(
session_empty_token_contract: StarknetContract,
uint256: Callable,
) -> StarknetContract:
for account in initial_balances:
await session_empty_token_contract.permissionedMint(
recipient=account, amount=uint256(initial_balances[account])
).invoke(caller_address=MINTER_ADDRESS)
return session_empty_token_contract
@pytest.fixture
async def starknet(session_starknet: Starknet) -> Starknet:
return copy.deepcopy(session_starknet)
@pytest.fixture
async def token_contract(
starknet: Starknet, session_token_contract: StarknetContract
) -> StarknetContract:
return StarknetContract(
state=starknet.state,
abi=erc20_contract_def.abi,
contract_address=session_token_contract.contract_address,
deploy_execution_info=session_token_contract.deploy_execution_info,
)
@pytest.mark.asyncio
async def test_permitted_minter(token_contract: StarknetContract):
execution_info = await token_contract.permittedMinter().call()
assert execution_info.result == (MINTER_ADDRESS,)
@pytest.mark.asyncio
async def test_name(token_contract: StarknetContract, token_name: int):
execution_info = await token_contract.name().call()
assert execution_info.result == (token_name,)
@pytest.mark.asyncio
async def test_symbol(token_contract: StarknetContract, token_symbol: int):
execution_info = await token_contract.symbol().call()
assert execution_info.result == (token_symbol,)
@pytest.mark.asyncio
async def test_decimal(token_contract: StarknetContract, token_decimals: int):
execution_info = await token_contract.decimals().call()
assert execution_info.result == (token_decimals,)
@pytest.mark.asyncio
async def test_total_supply(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply),)
@pytest.mark.asyncio
async def test_balance_of(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(0),)
@pytest.mark.asyncio
async def test_transfer_zero_sender(token_contract: StarknetContract, uint256: Callable):
amount = uint256(TRANSFER_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(sender\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=0
)
@pytest.mark.asyncio
async def test_transfer_zero_recipient(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.transfer(recipient=0, amount=uint256(TRANSFER_AMOUNT)).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_balance\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_invalid_uint256_amount(token_contract: StarknetContract, uint256: Callable):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="uint256_check\(amount\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_happy_flow(token_contract: StarknetContract, uint256: Callable):
transfer_amount = uint256(TRANSFER_AMOUNT)
await token_contract.transfer(recipient=uninitialized_account, amount=transfer_amount).invoke(
caller_address=initialized_account
)
expected_balance = uint256(initial_balances[initialized_account] - TRANSFER_AMOUNT)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (expected_balance,)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (transfer_amount,)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply),)
await token_contract.transfer(recipient=initialized_account, amount=transfer_amount).invoke(
caller_address=uninitialized_account
)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(0),)
# Tests the case of sender = recipient.
await token_contract.transfer(recipient=initialized_account, amount=transfer_amount).invoke(
caller_address=initialized_account
)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
@pytest.mark.asyncio
async def test_approve_zero_owner(token_contract: StarknetContract, uint256: Callable):
amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(caller\)"):
await token_contract.approve(spender=uninitialized_account, amount=amount).invoke(
caller_address=0
)
@pytest.mark.asyncio
async def test_approve_zero_spender(token_contract: StarknetContract, uint256: Callable):
amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(spender\)"):
await token_contract.approve(spender=0, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_approve_invalid_uint256_amount(token_contract: StarknetContract, uint256: Callable):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="uint256_check\(amount\)"):
await token_contract.approve(spender=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_approve_happy_flow(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(0),)
await token_contract.approve(
spender=uninitialized_account, amount=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT),)
@pytest.mark.asyncio
async def test_transfer_from_zero_sender(token_contract: StarknetContract, uint256: Callable):
# The contract fails when checking for sufficient allowance of account 0.
# Only because we cannot put a balance for address(0) or approve on its behalf.
# Could we do that, we would have failed on the more sensible error assert_not_zero(sender).
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=0, recipient=uninitialized_account, amount=uint256(TRANSFER_AMOUNT)
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_zero_recipient(token_contract: StarknetContract, uint256: Callable):
amount = uint256(TRANSFER_AMOUNT)
await token_contract.approve(spender=another_account, amount=uint256(TRANSFER_AMOUNT)).invoke(
caller_address=initialized_account
)
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=0, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.approve(
spender=another_account, amount=uint256(HIGH_APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_balance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_amount_bigger_than_allowance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.approve(spender=another_account, amount=uint256(APPROVE_AMOUNT)).invoke(
caller_address=initialized_account
)
amount = uint256(APPROVE_AMOUNT + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
@pytest.mark.parametrize("approve_num", [APPROVE_AMOUNT, HIGH_APPROVE_AMOUNT])
async def test_transfer_from_happy_flow(
token_contract: StarknetContract, uint256: Callable, approve_num: int
):
await token_contract.approve(spender=another_account, amount=uint256(approve_num)).invoke(
caller_address=initialized_account
)
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=uint256(TRANSFER_AMOUNT)
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_increase_allowance_zero_spender(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(spender\)"):
await token_contract.increaseAllowance(
spender=0, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_allowance_invalid_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="uint256_check\(added_value\)"):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(AMOUNT_BOUND)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_allowance_overflow(token_contract: StarknetContract, uint256: Callable):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
with pytest.raises(StarkException, match="assert \(is_overflow\) = 0"):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(AMOUNT_BOUND - APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_decrease_allowance_zero_spender(token_contract: StarknetContract, uint256: Callable):
approve_amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.decreaseAllowance(spender=0, subtracted_value=approve_amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_decrease_allowance_bigger_than_allowance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(APPROVE_AMOUNT + 1)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_decrease_allowance_invalid_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="uint256_check\(subtracted_value\)"):
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(AMOUNT_BOUND)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_and_decrease_allowance_happy_flow(
token_contract: StarknetContract, uint256: Callable
):
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(0),)
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT),)
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(int(APPROVE_AMOUNT / 2))
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT - int(APPROVE_AMOUNT / 2)),)
@pytest.mark.asyncio
async def test_permissioned_mint_wrong_minter(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert caller_address = permitted_address"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS + 1)
@pytest.mark.asyncio
async def test_permissioned_mint_zero_recipient(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.permissionedMint(recipient=0, amount=uint256(MINT_AMOUNT)).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_mint_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match=f"uint256_check\(amount\)"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(AMOUNT_BOUND)
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_mint_total_supply_out_of_range(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(AMOUNT_BOUND - initial_total_supply)
with pytest.raises(StarkException, match=f"assert \(is_overflow\) = 0"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=amount
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_mint_happy_flow(token_contract: StarknetContract, uint256: Callable):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(MINT_AMOUNT),)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply + MINT_AMOUNT),)
@pytest.mark.asyncio
async def test_permissioned_burn_wrong_minter(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert caller_address = permitted_address"):
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(BURN_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS + 1)
@pytest.mark.asyncio
async def test_permissioned_burn_zero_account(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(account\)"):
await token_contract.permissionedBurn(account=0, amount=uint256(BURN_AMOUNT)).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_burn_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match=f"uint256_check\(amount\)"):
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(AMOUNT_BOUND)
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_burn_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match=f"assert_not_zero\(enough_balance\)"):
await token_contract.permissionedBurn(account=initialized_account, amount=amount).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_burn_happy_flow(token_contract: StarknetContract, uint256: Callable):
await token_contract.permissionedMint(
recipient=initialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(BURN_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
expected_balance = uint256(initial_balances[initialized_account] + MINT_AMOUNT - BURN_AMOUNT)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (expected_balance,)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply + MINT_AMOUNT - BURN_AMOUNT),)
| src/starkware/starknet/apps/starkgate/cairo/token_test.py | 21,056 | Not initialized_account and not uninitialized_account. 0 < TRANSFER_AMOUNT < APPROVE_AMOUNT < initial_balance < HIGH_APPROVE_AMOUNT. Tests the case of sender = recipient. The contract fails when checking for sufficient allowance of account 0. Only because we cannot put a balance for address(0) or approve on its behalf. Could we do that, we would have failed on the more sensible error assert_not_zero(sender). | 411 | en | 0.906517 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubnetAssociation(Model):
"""Network interface and its custom security rules.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Subnet ID.
:vartype id: str
:param security_rules: Collection of custom security rules.
:type security_rules:
list[~azure.mgmt.network.v2018_08_01.models.SecurityRule]
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'},
}
def __init__(self, **kwargs):
super(SubnetAssociation, self).__init__(**kwargs)
self.id = None
self.security_rules = kwargs.get('security_rules', None)
| src/virtual-network-tap/azext_vnettap/vendored_sdks/v2018_08_01/models/subnet_association.py | 1,302 | Network interface and its custom security rules.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Subnet ID.
:vartype id: str
:param security_rules: Collection of custom security rules.
:type security_rules:
list[~azure.mgmt.network.v2018_08_01.models.SecurityRule]
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 772 | en | 0.639006 |
from flask import render_template, url_for, request, flash, redirect, make_response
import email
from app import app
from werkzeug.utils import secure_filename
from app.predict_email import Prediction
import tempfile
predict_email = Prediction()
def parse_email(email_raw):
parser = email.parser.BytesParser()
email_parsed = parser.parse(email_raw)
return email_parsed
@app.route("/")
def home():
return render_template("home.html")
@app.route("/predict", methods=["POST", "GET"])
def predict():
if request.method == "POST":
email_raw = request.files["email_raw"]
if email_raw.filename != "":
temp_name = next(tempfile._get_candidate_names())
with open(f"./app/data/uploads/{temp_name}.eml", "wb") as f:
f.write(email_raw.read())
spam,prediction = predict_email.predict_emails([f"./app/data/uploads/{temp_name}.eml"])
# email_parsed = parse_email(email_raw)
# print(email["subject"])
# Features = prepData(textData)
# prediction = int((np.asscalar(loaded_model.predict(Features))) * 100)
if spam:
page = "spam.html"
score = int(round(prediction[0][1]*100))
else:
page = "ham.html"
score = int(round(prediction[0][0]*100))
r = make_response(render_template(page, prediction=score))
r.headers.add('Access-Control-Allow-Origin', '*')
r.headers.add('Access-Control-Expose-Headers', 'Content-Disposition')
return r
else:
return render_template("home.html")
else:
return render_template("home.html")
@app.route("/predict2")
def predict2():
return render_template("ham.html")
# @app.route("/predict", methods=["POST"])
# def predict():
# df = pd.read_csv("spam.csv", encoding="latin-1")
# df.drop(["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"], axis=1, inplace=True)
# # Features and Labels
# df["label"] = df["class"].map({"ham": 0, "spam": 1})
# X = df["message"]
# y = df["label"]
# # Extract Feature With CountVectorizer
# cv = CountVectorizer()
# X = cv.fit_transform(X) # Fit the Data
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(
# X, y, test_size=0.33, random_state=42
# )
# # Naive Bayes Classifier
# from sklearn.naive_bayes import MultinomialNB
# clf = MultinomialNB()
# clf.fit(X_train, y_train)
# clf.score(X_test, y_test)
# # Alternative Usage of Saved Model
# # joblib.dump(clf, 'NB_spam_model.pkl')
# # NB_spam_model = open('NB_spam_model.pkl','rb')
# # clf = joblib.load(NB_spam_model)
# if request.method == "POST":
# message = request.form["message"]
# data = [message]
# vect = cv.transform(data).toarray()
# my_prediction = clf.predict(vect)
# return render_template("result.html", prediction=my_prediction)
| app/routes.py | 3,055 | email_parsed = parse_email(email_raw) print(email["subject"]) Features = prepData(textData) prediction = int((np.asscalar(loaded_model.predict(Features))) * 100) @app.route("/predict", methods=["POST"]) def predict(): df = pd.read_csv("spam.csv", encoding="latin-1") df.drop(["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"], axis=1, inplace=True) Features and Labels df["label"] = df["class"].map({"ham": 0, "spam": 1}) X = df["message"] y = df["label"] Extract Feature With CountVectorizer cv = CountVectorizer() X = cv.fit_transform(X) Fit the Data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42 ) Naive Bayes Classifier from sklearn.naive_bayes import MultinomialNB clf = MultinomialNB() clf.fit(X_train, y_train) clf.score(X_test, y_test) Alternative Usage of Saved Model joblib.dump(clf, 'NB_spam_model.pkl') NB_spam_model = open('NB_spam_model.pkl','rb') clf = joblib.load(NB_spam_model) if request.method == "POST": message = request.form["message"] data = [message] vect = cv.transform(data).toarray() my_prediction = clf.predict(vect) return render_template("result.html", prediction=my_prediction) | 1,339 | en | 0.446009 |
class ParticleData(object):
""" Class for holding particle data such as charge.
"""
def __init__(self, charge=0):
self.charge=charge
def __repr__(self):
return "charge="+str(self.charge)
class ParticleDataList(object):
""" Class for generic handling particle ids, names and properties.
Multiple ids can be mapped to multiple names of particle.
First name/id in the list is the default name. But additional names/ids can be given.
An examples can be found in the defaultParticleDataList.
"""
def __init__(self, list=None):
""" A list of particle ids and names can be given to the constructor.
"""
self._list = []
if list != None:
self._list = list
def setList(self, list):
self._list = list
def getList(self):
return self._list
def addParticle(self, ids, names, particleData):
""" Add a paricle with (multiple) ids and names to the list.
"""
if not (isinstance(ids,list) and isinstance(names,list)):
raise TypeError("addParticle needs to lists as input: e.g. [1,-1],['d','dbar']")
self._list += [(ids, names, particleData)]
def getDefaultName(self, name):
""" Return the default (first in list) name given any of the particle's names.
"""
for items in self._list:
if name in items[1]:
return items[1][0]
return name
def getDefaultId(self, id):
""" Return the default (first in list) id given any of the particle's ids.
"""
for items in self._list:
if id in items[0]:
return items[0][0]
return id
def getIdFromName(self, name):
""" Return the default (first in list) id given any of the particle's names.
"""
for items in self._list:
if name in items[1]:
return items[0][0]
return 0
def getNameFromId(self, id):
""" Return the default (first in list) name given any of the particle's ids.
"""
for items in self._list:
if id in items[0]:
return items[1][0]
return "unknown"
def getParticleDataFromId(self, id):
for items in self._list:
if id in items[0]:
return items[2]
def isQuarkId(self, id):
return abs(id) in [1, 2, 3, 4, 5, 6]
def isLeptonId(self, id):
return abs(id) in [11, 12, 13, 14, 15, 16]
def isGluonId(self, id):
return abs(id) in [21, 9]
def isBosonId(self, id):
return abs(id) in [21, 9, 22, 23, 24, 25, 32, 33, 34, 35, 36, 37]
def isPhotonId(self, id):
return id == 22
def isHiggsId(self, id):
return abs(id) in [25, 35, 36, 37]
def isSusyId(self, id):
return abs(id) in [1000001, 1000002, 1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015, 1000016, 2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000013, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039]
defaultQuarkDataList = ParticleDataList([
([1, - 1], ["d", "d_quark", "dbar"], ParticleData(1.0/3.0)),
([2, - 2], ["u", "u_quark", "ubar"], ParticleData(2.0/3.0)),
([3, - 3], ["s", "s_quark", "sbar"], ParticleData(1.0/3.0)),
([4, - 4], ["c", "c_quark", "cbar"], ParticleData(2.0/3.0)),
([5, - 5], ["b", "b_quark", "bbar"], ParticleData(1.0/3.0)),
([6, - 6], ["t", "t_quark", "tbar"], ParticleData(2.0/3.0))
])
defaultLeptonDataList = ParticleDataList([
([11, - 11], ["e","electron", "Electron", "e+", "e-"], ParticleData(1)),
([12, - 12], ["nu_e", "Electron_neutrino", "electron_neutrino", "nu_electron"], ParticleData(0)),
([13, - 13], ["mu", "Muon", "muon", "mu+", "mu-"], ParticleData(1)),
([14, - 14], ["nu_mu", "nu_muon", "Muon_neutrino", "muon_neutrino"], ParticleData(0)),
([15, - 15], ["tau", "Tau", "tau+", "tau-"], ParticleData(1)),
([16, - 16], ["nu_tau", "Tau_neutrino", "tau_neutrino"], ParticleData(0))
])
defaultBosonDataList = ParticleDataList([
([21, 9], ["g", "Gluon", "gluon"], ParticleData(0)),
([22], ["gamma", "Photon", "photon"], ParticleData(0)),
([23], ["Z", "Z_boson"], ParticleData(0)),
([24, - 24], ["W", "W_boson", "W+", "W-"], ParticleData(1)),
([25], ["h", "Higgs_boson", "Higgs", "higgs_boson"], ParticleData(0))
])
defaultHadronDataList = ParticleDataList([
([111], ["pi0", "Pi0"], ParticleData(0)),
([112], ["pi+", "Pi+"], ParticleData(1)),
([221], ["eta", "Eta"], ParticleData(0)),
([130], ["K0_L"], ParticleData(0)),
([310], ["K0_S"], ParticleData(0)),
([311], ["K0"], ParticleData(0)),
([321], ["K+"], ParticleData(1)),
([411], ["D0"], ParticleData(0)),
([421], ["D+"], ParticleData(1)),
([511], ["B0"], ParticleData(0)),
([521], ["B+"], ParticleData(1)),
([2212], ["p","Proton","proton"], ParticleData(1)),
([2112], ["n","Neutron","neutron"], ParticleData(0)),
([2224], ["Delta++"], ParticleData(2)),
([2214], ["Delta+"], ParticleData(1)),
([2114], ["Delta0"], ParticleData(0)),
([1114], ["Delta-"], ParticleData(1))
])
defaultExtensionDataList = ParticleDataList([
([32], ["Z'", "Z_prime"], ParticleData(0)),
([33], ["Z''", "Z_primeprime"], ParticleData(0)),
([34, - 34], ["W'", "W_prime", "W'+", "W'-"], ParticleData(1)),
([37, - 37], ["H+", "Charged_Higgs", "H+", "H-"], ParticleData(1)),
([35], ["H0", "Neutral_Higgs_H", "H"], ParticleData(0)),
([36], ["A0", "Neutral_Higgs_A", "A"], ParticleData(0))
])
defaultSusyDataList = ParticleDataList([
([1000001, - 1000001], ["d_squark_L", "d~_L", "d~_L_bar"], ParticleData(1.0/3.0)),
([1000002, - 1000002], ["u_squark_L", "u~_L", "u~_L_bar"], ParticleData(2.0/3.0)),
([1000003, - 1000003], ["s_squark_L", "s~_L", "s~_L_bar"], ParticleData(1.0/3.0)),
([1000004, - 1000004], ["c_squark_L", "c~_L", "c~_L_bar"], ParticleData(2.0/3.0)),
([1000005, - 1000005], ["sbottom_L", "b~_1", "b~_1_bar"], ParticleData(1.0/3.0)),
([1000006, - 1000006], ["stop_L", "t~_1", "t~_1_bar"], ParticleData(2.0/3.0)),
([1000011, - 1000011], ["Selectron_L", "selectron_L", "e~_L", "e~_L+", "e~_L-"], ParticleData(1)),
([1000012, - 1000012], ["Electron_sneutrino", "electron_sneutrino", "nu~_e_L"], ParticleData(0)),
([1000013, - 1000013], ["Smuon_L", "smuon_L", "mu~_L", "mu~_L+", "mu~_L-"], ParticleData(1)),
([1000014, - 1000014], ["Muon_sneutrino", "muon_sneutrino", "nu~_mu_L"], ParticleData(0)),
([1000015, - 1000015], ["Stau_1", "stau_1", "tau~_1+", "tau~_1-"], ParticleData(1)),
([1000016, - 1000016], ["Tau_sneutrino", "tau_sneutrino", "nu~_tau_L"], ParticleData(0)),
([2000001, - 2000001], ["d_squark_R", "d~_L", "d~_L_bar"], ParticleData(1.0/3.0)),
([2000002, - 2000002], ["u_squark_R", "u~_L", "u~_L_bar"], ParticleData(2.0/3.0)),
([2000003, - 2000003], ["s_squark_R", "s~_L", "s~_L_bar"], ParticleData(1.0/3.0)),
([2000004, - 2000004], ["c_squark_R", "c~_L", "c~_L_bar"], ParticleData(2.0/3.0)),
([2000005, - 2000005], ["sbottom_R", "b~_2", "b~_2_bar"], ParticleData(1.0/3.0)),
([2000006, - 2000006], ["stop_R", "t~_2", "t~_2_bar"], ParticleData(2.0/3.0)),
([2000011, - 2000011], ["Selectron_R", "selectron_R", "e~_R", "e~_R+", "e~_R-"], ParticleData(1)),
([1000013, - 1000013], ["Smuon_R", "smuon_R", "mu~_L", "mu~_R+", "mu~_R-"], ParticleData(1)),
([1000015, - 1000015], ["Stau_2", "stau_2", "tau~_2+", "tau~_2 -"], ParticleData(1)),
([1000021], ["Gluino", "gluino", "g~"], ParticleData(0)),
([1000022, - 1000022], ["Neutralino_1", "neutralino_1", "chi~_1"], ParticleData(0)),
([1000023, - 1000023], ["Neutralino_2", "neutralino_2", "chi~_2"], ParticleData(0)),
([1000025, - 1000025], ["Neutralino_3", "neutralino_3", "chi~_3"], ParticleData(0)),
([1000035, - 1000035], ["Neutralino_4", "neutralino4", "chi~_4"], ParticleData(0)),
([1000024, - 1000024], ["Chargino_1", "chargino_1", "chi~_1+", "chi~_1-"], ParticleData(1)),
([1000037, - 1000037], ["Chargino_2", "chargino_2", "chi~_2+", "chi~_2-"], ParticleData(1)),
([1000039], ["Gravitino", "gravitino", "G"], ParticleData(0))
])
defaultParticleDataList = ParticleDataList(
defaultQuarkDataList.getList() +
defaultLeptonDataList.getList() +
defaultBosonDataList.getList() +
defaultHadronDataList.getList() +
defaultExtensionDataList.getList() +
defaultSusyDataList.getList())
partonParticleDataList = ParticleDataList([
([1, - 1, 2, - 2, 3, - 3, 4, - 4, 21, 9], ["parton", "d", "dbar", "u", "ubar", "s", "sbar", "c", "cbar", "b", "bbar", "t", "tbar", "gluon", "g"], ParticleData())
] +
defaultLeptonDataList.getList() + [
([22], ["gamma", "Photon", "photon"], ParticleData(0)),
([23], ["Z", "Z_boson"], ParticleData(0)),
([24, - 24], ["W", "W_boson", "W+", "W-"], ParticleData(1)),
([25], ["h", "Higgs_boson", "Higgs", "higgs_boson"], ParticleData(1))
])
| FWCore/GuiBrowsers/python/Vispa/Plugins/EdmBrowser/ParticleDataList.py | 8,799 | Class for holding particle data such as charge.
Class for generic handling particle ids, names and properties.
Multiple ids can be mapped to multiple names of particle.
First name/id in the list is the default name. But additional names/ids can be given.
An examples can be found in the defaultParticleDataList.
A list of particle ids and names can be given to the constructor.
Add a paricle with (multiple) ids and names to the list.
Return the default (first in list) id given any of the particle's ids.
Return the default (first in list) name given any of the particle's names.
Return the default (first in list) id given any of the particle's names.
Return the default (first in list) name given any of the particle's ids. | 777 | en | 0.792197 |
import torch
from torch import nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import random
import numpy as np
import scipy as sp
import gurobipy as gp
from qpthlocal.qp import QPFunction
from qpthlocal.qp import QPSolvers
from qpthlocal.qp import make_gurobi_model
import pickle
import sys
import datetime
from collections import defaultdict
import math
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
import logging
import datetime
import time
from collections import defaultdict
from sklearn.metrics import mean_squared_error as mse
from scipy.special import expit, logit
import copy
sys.path.insert(0,'../Interior/')
sys.path.insert(0,'../..')
# from ip_model import *
from ip_model_whole import *
from remove_redundancy import _remove_redundancy, _remove_redundancy_sparse, _remove_redundancy_dense
from sgd_learner import *
import pandas as pd
def bceloss(inputs,target):
return -(np.log(1-expit(inputs)) + target*inputs).mean()
def _remove_redundant_rows (A_eq):
# remove redundant (linearly dependent) rows from equality constraints
n_rows_A = A_eq.shape[0]
redundancy_warning = ("A_eq does not appear to be of full row rank. To "
"improve performance, check the problem formulation "
"for redundant equality constraints.")
# if (sps.issparse(A_eq)):
# if rr and A_eq.size > 0: # TODO: Fast sparse rank check?
# A_eq, b_eq, status, message = _remove_redundancy_sparse(A_eq, b_eq)
# if A_eq.shape[0] < n_rows_A:
# warn(redundancy_warning, OptimizeWarning, stacklevel=1)
# if status != 0:
# complete = True
# return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,
# x, x0, undo, complete, status, message)
# This is a wild guess for which redundancy removal algorithm will be
# faster. More testing would be good.
small_nullspace = 5
if A_eq.size > 0:
try: # TODO: instead use results of first SVD in _remove_redundancy
rank = np.linalg.matrix_rank(A_eq)
except Exception: # oh well, we'll have to go with _remove_redundancy_dense
rank = 0
if A_eq.size > 0 and rank < A_eq.shape[0]:
warn(redundancy_warning, OptimizeWarning, stacklevel=3)
dim_row_nullspace = A_eq.shape[0]-rank
if dim_row_nullspace <= small_nullspace:
d_removed, status, message = _remove_redundancy(A_eq)
if dim_row_nullspace > small_nullspace :
d_removed, status, message = _remove_redundancy_dense(A_eq)
if A_eq.shape[0] < rank:
message = ("Due to numerical issues, redundant equality "
"constraints could not be removed automatically. "
"Try providing your constraint matrices as sparse "
"matrices to activate sparse presolve, try turning "
"off redundancy removal, or try turning off presolve "
"altogether.")
status = 4
if status != 0:
complete = True
return d_removed
def get_loss(net,A, X, y,instances):
net.eval()
rslt = []
c_pred = net(torch.from_numpy(X).float()).squeeze().detach().numpy()
c = y
for k,v in instances.items():
source, destination = v
b = np.zeros(len(A))
b [source] =1
b[destination ]=-1
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape=A.shape[1], vtype=gp.GRB.BINARY, name="x")
model.setObjective(c_pred @x, gp.GRB.MINIMIZE)
model.addConstr(A @ x == b, name="eq")
model.optimize()
if model.status ==2:
sol =x.X
rslt.append( c.dot(sol))
else:
print(model.status, k,v)
net.train()
return mse(c_pred,c), sum(rslt)
def validation_module(net,A, X,y, training_instances,validation_instances, test_instances,time,
epoch,subepoch,**kwargs):
# return bceloss(c_pred,c), sum(rslt)
dict_validation = {}
losses_test = get_loss(net, A, X,y,test_instances)
dict_validation['test_prediction_loss'] = losses_test[0]
dict_validation['test_task_loss'] = losses_test[1]
losses_train = get_loss(net, A, X,y,training_instances)
dict_validation['train_prediction_loss'] = losses_train[0]
dict_validation['train_task_loss'] = losses_train[1]
losses_validation = get_loss(net, A, X,y,validation_instances)
dict_validation['validation_prediction_loss'] = losses_validation[0]
dict_validation['validation_task_loss'] = losses_validation[1]
dict_validation['batch'] = subepoch
dict_validation['epoch'] = epoch
dict_validation['time'] = time
return dict_validation
def make_fc(num_layers, num_features, num_targets=1,
activation_fn = nn.ReLU,intermediate_size=50, regularizers = True):
net_layers = [nn.Linear(num_features, intermediate_size),
activation_fn()]
for hidden in range(num_layers-2):
net_layers.append(nn.Linear(intermediate_size, intermediate_size))
net_layers.append(activation_fn())
net_layers.append(nn.Linear(intermediate_size, num_targets))
net_layers.append(nn.ReLU())
return nn.Sequential(*net_layers)
class two_stage_matching:
def __init__(self,A,num_features, num_layers, intermediate_size,
activation_fn = nn.ReLU, num_instance=1,
epochs=10,batchsize= 256, optimizer=optim.Adam,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.epochs = epochs
self.batchsize = batchsize
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
indexes = np.arange(n_train)
loss_fn = nn.MSELoss()# nn.KLDivLoss(reduction='batchmean')
for e in range(self.epochs):
start_time = time.time()
np.random.shuffle(indexes)
num_batches = len(indexes) //(self.batchsize)
bi = 0#batch-index
for b in range(num_batches):
self.optimizer.zero_grad()
X_np = X[indexes[bi:(bi+self.batchsize)]]
y_np = y[indexes[bi:(bi+self.batchsize)]]
bi += self.batchsize
X_torch = torch.from_numpy(X_np).float()
y_torch = torch.from_numpy(y_np).float()
c_pred = self.net(X_torch).squeeze()
loss = loss_fn(c_pred,y_torch)
loss.backward()
self.optimizer.step()
end_time = time.time()
time_ += end_time - start_time
if self.validation:
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances, test_instances,time_,e,b))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class qptl:
def __init__(self,A,num_features, num_layers, intermediate_size,num_instance= 1,
activation_fn = nn.ReLU, epochs=10,optimizer=optim.Adam,
gamma=1e-5,validation=False,
**hyperparams):
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.A = A
self.num_instance = num_instance
self.epochs = epochs
self.optimizer = optimizer
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
self.gamma= gamma
def fit(self,X,y,instances):
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
logging.info("training started")
# rows_to_be_removed = _remove_redundant_rows(self.A)
# A_torch = torch.from_numpy(np.delete(self.A, rows_to_be_removed, axis=0)).float()
A_torch = torch.from_numpy(self.A).float()
Q_torch = self.gamma*torch.eye(A_torch.shape[1])
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
G_torch = -1*torch.eye(A_torch.shape[1])
h_torch = torch.zeros(A_torch.shape[1])
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
# b = np.zeros(len(self.A))
# b[source] =1
# b[dest ]=-1
# b= np.delete(b, rows_to_be_removed)
# b_torch = torch.from_numpy(b).float()
b_torch = torch.zeros(len(self.A))
b_torch[source] =1
b_torch[dest ]=-1
model_params_quad = make_gurobi_model(G_torch.detach().numpy(),
h_torch.detach().numpy(),A_torch.detach().numpy(),
b_torch.detach().numpy(), Q_torch.detach().numpy())
# model_params_quad = make_gurobi_model(None,None,
# A_torch.detach().numpy(),
# b_torch.detach().numpy(), Q_torch.detach().numpy())
c_pred = self.net(X_torch)
if any(torch.isnan(torch.flatten(c_pred)).tolist()):
logging.info("**Alert** nan in param c_pred ")
if any(torch.isinf(torch.flatten(c_pred)).tolist()):
logging.info("**Alert** inf in param c_pred ")
logging.info("shapes c {} A {} b {} G {} h {} Q {}".format(c_pred.shape,
A_torch.shape,b_torch.shape,G_torch.shape,h_torch.shape,
Q_torch.shape ))
x = QPFunction(verbose=False, solver=QPSolvers.GUROBI,
model_params= model_params_quad)(Q_torch.expand(1, *Q_torch.shape),
c_pred.squeeze(),G_torch.expand(1, *G_torch.shape),
h_torch.expand(1, *h_torch.shape),
A_torch.expand(1, *A_torch.shape),
b_torch.expand(1, *b_torch.shape))
# x = QPFunction(verbose=False, solver=QPSolvers.GUROBI,
# model_params= model_params_quad)(Q_torch.expand(1, *Q_torch.shape),
# c_pred.squeeze(),torch.Tensor(),
# torch.Tensor(),
# A_torch.expand(1, *A_torch.shape),
# b_torch.expand(1, *b_torch.shape))
c_pred.retain_grad()
loss = (y_torch*x).mean()
loss.backward()
c_grad = copy.deepcopy(c_pred.grad)
if any(torch.isnan(torch.flatten(c_grad)).tolist()):
logging.info("**Alert** nan in param c_grad ")
self.optimizer.step()
# logging.info("bkwd done")
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0):
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class intopt:
def __init__(self,A, num_features, num_layers, intermediate_size,
num_instance= 1,activation_fn = nn.ReLU,epochs=10,optimizer=optim.Adam,
method=1,max_iter=100,smoothing=False,thr = None,mu0=None,full_row_rank=True,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.num_instance = num_instance
self.method = method
self.epochs = epochs
self.method = method
self.optimizer = optimizer
self.max_iter = max_iter
self.smoothing = smoothing
self.thr = thr
self.mu0 = mu0
self.validation = validation
self.full_row_rank = full_row_rank
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
#A_torch = torch.from_numpy(self.A).float()
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
# model = gp.Model()
# model.setParam('OutputFlag', 0)
# x = model.addMVar(shape= self.A.shape[1], lb=0.0, vtype=gp.GRB.CONTINUOUS, name="x")
if self.full_row_rank:
rows_to_be_removed = _remove_redundant_rows(self.A)
A_torch = torch.from_numpy(np.delete(self.A, rows_to_be_removed, axis=0)).float()
else:
A_torch = torch.from_numpy(self.A).float()
logging.info("shape of A {} shape of A-torch {}".format(self.A.shape,A_torch.shape))
# A_ = np.delete(A_, rows_to_be_removed, axis=0)
# b_ = np.delete(b_, rows_to_be_removed)
# A_torch = torch.from_numpy(self.A).float()
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
logging.info("training started")
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
if self.full_row_rank:
b = np.zeros(len(self.A))
b[source] =1
b[dest ]=-1
b= np.delete(b, rows_to_be_removed)
b_torch = torch.from_numpy(b).float()
else:
b_torch = torch.zeros(len(self.A))
b_torch[source] = 1
b_torch[dest] = -1
c_pred = self.net(X_torch).squeeze()
x = IPOfunc(A_torch,b_torch,torch.Tensor(),torch.Tensor(),
bounds= [(0., None)],
max_iter=self.max_iter,mu0 = self.mu0,
thr=self.thr,method = self.method,
smoothing=self.smoothing)(c_pred)
loss = (y_torch*x).mean()
loss.backward()
self.optimizer.step()
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0) :
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze()
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
class SPO:
def __init__(self,A,num_features, num_layers, intermediate_size,num_instance= 1,
activation_fn = nn.ReLU, epochs=10,optimizer=optim.Adam,
validation=False,**hyperparams):
self.A = A
self.num_features = num_features
self.num_layers = num_layers
self.activation_fn = activation_fn
self.intermediate_size = intermediate_size
self.epochs = epochs
self.num_instance = num_instance
self.validation = validation
self.net = make_fc(num_layers=num_layers, num_features=num_features,
activation_fn= activation_fn,
intermediate_size= intermediate_size)
self.optimizer = optimizer(self.net.parameters(), **hyperparams)
def fit(self,X,y,instances):
#A_torch = torch.from_numpy(self.A).float()
test_instances = instances['test']
validation_instances = instances['validation']
train_instances = instances['train']
time_ = 0
self.model_time = 0
n_train = X.shape[0]
if self.validation:
validation_list = []
X_torch = torch.from_numpy(X).float()
y_torch = torch.from_numpy(y).float()
true_solution ={}
logging.info("training started")
for e in range(self.epochs):
for i in range(self.num_instance):
start_time = time.time()
self.optimizer.zero_grad()
source, dest = train_instances[i]
b = np.zeros(len(self.A))
b[source] =1
b[dest ]=-1
if i not in true_solution:
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= self.A.shape[1], lb=0.0, vtype=gp.GRB.CONTINUOUS, name="x")
model.addConstr(self.A @ x == b, name="eq")
model.setObjective((y_torch.detach().numpy())@x, gp.GRB.MINIMIZE)
model.optimize()
x_true = x.X
true_solution[i] = np.copy(x_true)
x_true = true_solution[i]
c_pred = self.net(X_torch).squeeze()
c_spo = (2*c_pred - y_torch)
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= self.A.shape[1], lb=0.0, ub=1.0,vtype=gp.GRB.CONTINUOUS, name="x")
model.addConstr(self.A @ x == b, name="eq")
model.setObjective((c_spo.detach().numpy())@x, gp.GRB.MINIMIZE)
model.optimize()
#print(model.status)
x_spo = x.X
grad = torch.from_numpy( x_true - x_spo).float()
loss = self.net(X_torch).squeeze()
loss.backward(gradient=grad)
self.optimizer.step()
logging.info("bkwd done")
end_time = time.time()
time_ += end_time - start_time
if self.validation:
if ((i+1)%20==0):
validation_list.append( validation_module(self.net,self.A,
X,y,train_instances,validation_instances,
test_instances,time_,e,i))
print("Epoch {} Loss:{} Time: {:%Y-%m-%d %H:%M:%S}".format(e+1,loss.sum().item(),
datetime.datetime.now()))
if self.validation :
dd = defaultdict(list)
for d in validation_list:
for key, value in d.items():
dd[key].append(value)
df = pd.DataFrame.from_dict(dd)
# print(validation_module(self.net,self.A,
# X,y,train_instances,validation_instances,
# test_instances,time_,e,i))
# pred = self.predict(X)
# print(mse(pred,y))
logging.info('Completion Time %s \n' %str(datetime.datetime.now()) )
return df
def validation_result(self,X,y, instances):
validation_rslt = get_loss(self.net, self.A, X,y,instances)
return validation_rslt[0], validation_rslt[1]
def predict(self,X):
X_torch = torch.from_numpy(X).float()
self.net.eval()
pred= self.net(X_torch)
self.net.train()
return pred.detach().detach().numpy().squeeze() | shortespath/shortespath.py | 19,575 | from ip_model import * remove redundant (linearly dependent) rows from equality constraints if (sps.issparse(A_eq)): if rr and A_eq.size > 0: TODO: Fast sparse rank check? A_eq, b_eq, status, message = _remove_redundancy_sparse(A_eq, b_eq) if A_eq.shape[0] < n_rows_A: warn(redundancy_warning, OptimizeWarning, stacklevel=1) if status != 0: complete = True return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, x0, undo, complete, status, message) This is a wild guess for which redundancy removal algorithm will be faster. More testing would be good. TODO: instead use results of first SVD in _remove_redundancy oh well, we'll have to go with _remove_redundancy_dense return bceloss(c_pred,c), sum(rslt) nn.KLDivLoss(reduction='batchmean') batch-index rows_to_be_removed = _remove_redundant_rows(self.A) A_torch = torch.from_numpy(np.delete(self.A, rows_to_be_removed, axis=0)).float() b = np.zeros(len(self.A)) b[source] =1 b[dest ]=-1 b= np.delete(b, rows_to_be_removed) b_torch = torch.from_numpy(b).float() model_params_quad = make_gurobi_model(None,None, A_torch.detach().numpy(), b_torch.detach().numpy(), Q_torch.detach().numpy()) x = QPFunction(verbose=False, solver=QPSolvers.GUROBI, model_params= model_params_quad)(Q_torch.expand(1, *Q_torch.shape), c_pred.squeeze(),torch.Tensor(), torch.Tensor(), A_torch.expand(1, *A_torch.shape), b_torch.expand(1, *b_torch.shape)) logging.info("bkwd done")A_torch = torch.from_numpy(self.A).float() model = gp.Model() model.setParam('OutputFlag', 0) x = model.addMVar(shape= self.A.shape[1], lb=0.0, vtype=gp.GRB.CONTINUOUS, name="x") A_ = np.delete(A_, rows_to_be_removed, axis=0) b_ = np.delete(b_, rows_to_be_removed) A_torch = torch.from_numpy(self.A).float()A_torch = torch.from_numpy(self.A).float() print(model.status) print(validation_module(self.net,self.A, X,y,train_instances,validation_instances, test_instances,time_,e,i)) pred = self.predict(X) print(mse(pred,y)) | 2,022 | en | 0.41191 |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Divi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the zapwallettxes functionality.
- start two divid nodes
- create two transactions on node 0 - one is confirmed and one is unconfirmed.
- restart node 0 and verify that both the confirmed and the unconfirmed
transactions are still available.
- restart node 0 with zapwallettxes and persistmempool, and verify that both
the confirmed and the unconfirmed transactions are still available.
- restart node 0 with just zapwallettxes and verify that the confirmed
transactions are still available, but that the unconfirmed transaction has
been zapped.
"""
from test_framework.test_framework import DiviTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
wait_until,
)
class ZapWalletTXesTest (DiviTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(100)
self.sync_all()
# This transaction will be confirmed
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)
self.nodes[0].generate(1)
self.sync_all()
# This transaction will not be confirmed
txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
# Confirmed and unconfirmed transactions are now in the wallet.
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
self.stop_node(0)
self.start_node(0)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed
# transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
self.stop_node(0)
self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3)
self.nodes[0].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes, but not persistmempool.
# The unconfirmed transaction is zapped and is no longer in the wallet.
self.stop_node(0)
self.start_node(0, ["-zapwallettxes=2"])
# tx1 is still be available because it was confirmed
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
# This will raise an exception because the unconfirmed transaction has been zapped
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
if __name__ == '__main__':
ZapWalletTXesTest().main()
| test/functional/wallet_zapwallettxes.py | 3,395 | Test the zapwallettxes functionality.
- start two divid nodes
- create two transactions on node 0 - one is confirmed and one is unconfirmed.
- restart node 0 and verify that both the confirmed and the unconfirmed
transactions are still available.
- restart node 0 with zapwallettxes and persistmempool, and verify that both
the confirmed and the unconfirmed transactions are still available.
- restart node 0 with just zapwallettxes and verify that the confirmed
transactions are still available, but that the unconfirmed transaction has
been zapped.
!/usr/bin/env python3 Copyright (c) 2014-2018 The Divi Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This transaction will be confirmed This transaction will not be confirmed Confirmed and unconfirmed transactions are now in the wallet. Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet. Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed transaction is zapped from the wallet, but is re-added when the mempool is reloaded. Flush mempool to wallet Stop node0 and restart with zapwallettxes, but not persistmempool. The unconfirmed transaction is zapped and is no longer in the wallet. tx1 is still be available because it was confirmed This will raise an exception because the unconfirmed transaction has been zapped | 1,440 | en | 0.875002 |
"""
Filename: RobotsParser.py
Author: Maxwell Goldberg
Last modified: 06.09.17
Description: Helper class for parsing individual robots.txt records.
"""
# CONSTANTS
from constants import RECORD_MAX_LEN
# PYTHON BUILTINS
import re, unicodedata, logging
def test_ctrl_chars(s):
return len(s) != len("".join(ch for ch in s if unicodedata.category(ch)[0]!="C"))
class RobotsParser:
valid_fields = [u'user-agent', u'allow', u'disallow']
def __init__(self, record=None):
if record is None:
raise TypeError('Parameter record must not be NoneType')
if not isinstance(record, unicode):
raise TypeError('Parameter record must be a Unicode string')
if len(record) > RECORD_MAX_LEN:
raise ValueError('Parameter record exceeds maximum record num characters')
self.record = record
def parse_field(self, field):
field = field.strip().lower()
if field not in RobotsParser.valid_fields:
raise ValueError('Record contains invalid field')
return field
def parse_path(self, path):
path = path.strip()
if test_ctrl_chars(path):
raise ValueError('Record path contains control characters')
# Get path length prior to parsing
self.init_path_len = len(path)
path = re.escape(path)
path = path.replace('\\*', '.*').replace('\\$', '$')
return path
def parse(self):
# Attempt to separate a record by a colon delimiter.
record_list = self.record.split('#')[0]
record_list = record_list.split(':', 1)
if len(record_list) <= 1:
raise ValueError('Record must contain a delimiter')
if len(record_list) > 2:
raise ValueError('Record contains too many delimited fields')
# Parse the field
self.field = self.parse_field(record_list[0])
# Parse the path
self.path = self.parse_path(record_list[1]) | crawler/lib/RobotsParser.py | 1,742 | Filename: RobotsParser.py
Author: Maxwell Goldberg
Last modified: 06.09.17
Description: Helper class for parsing individual robots.txt records.
CONSTANTS PYTHON BUILTINS Get path length prior to parsing Attempt to separate a record by a colon delimiter. Parse the field Parse the path | 286 | en | 0.742863 |
# -*- coding: utf-8 -*-
# @Time : 2019/5/11 15:12
# @Author : LegenDong
# @User : legendong
# @File : __init__.py.py
# @Software: PyCharm
from .channel_attention_layer import *
from .nan_attention_layer import *
| models/layer/__init__.py | 223 | -*- coding: utf-8 -*- @Time : 2019/5/11 15:12 @Author : LegenDong @User : legendong @File : __init__.py.py @Software: PyCharm | 135 | en | 0.206452 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import src.proto.predict_pb2 as predict__pb2
class PredictionServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Predict = channel.unary_unary(
"/onnxruntime.server.PredictionService/Predict",
request_serializer=predict__pb2.PredictRequest.SerializeToString,
response_deserializer=predict__pb2.PredictResponse.FromString,
)
class PredictionServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def Predict(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_PredictionServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"Predict": grpc.unary_unary_rpc_method_handler(
servicer.Predict,
request_deserializer=predict__pb2.PredictRequest.FromString,
response_serializer=predict__pb2.PredictResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler("onnxruntime.server.PredictionService", rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class PredictionService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Predict(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/onnxruntime.server.PredictionService/Predict",
predict__pb2.PredictRequest.SerializeToString,
predict__pb2.PredictResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| chapter2_training/cifar10/evaluate/src/proto/prediction_service_pb2_grpc.py | 2,523 | Missing associated documentation comment in .proto file.
Missing associated documentation comment in .proto file.
Missing associated documentation comment in .proto file.
Missing associated documentation comment in .proto file.
Constructor.
Args:
channel: A grpc.Channel.
Client and server classes corresponding to protobuf-defined services.
Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! This class is part of an EXPERIMENTAL API. | 459 | en | 0.664279 |
#Importing Libraries
import os
import csv
import sys, getopt
import uuid
import SimpleITK as sitk
import cv2
import numpy as np
import tensorflow as tf
from flask import Flask, flash, request, redirect, render_template
from flask import jsonify
from flask import send_from_directory
from flask_materialize import Material
from tensorflow.python.keras.backend import set_session
from werkzeug.utils import secure_filename
import shutil
import nibabel as nib
import pandas as pd
import numpy
from sarcopenia_ai.apps.segmentation.segloader import preprocess_test_image
from sarcopenia_ai.apps.server import settings
from sarcopenia_ai.apps.slice_detection.predict import parse_inputs, to256
from sarcopenia_ai.apps.slice_detection.utils import decode_slice_detection_prediction, \
preprocess_sitk_image_for_slice_detection, adjust_detected_position_spacing, place_line_on_img
from sarcopenia_ai.core.model_wrapper import BaseModelWrapper
from sarcopenia_ai.io import load_image
from sarcopenia_ai.preprocessing.preprocessing import blend2d
from sarcopenia_ai.utils import compute_muscle_area, compute_muscle_attenuation
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
graph = tf.get_default_graph()
import cv2
import numpy as np
def normalise_zero_one(image, eps=1e-8):
print("Here 1")
image = image.astype(np.float32)
ret = (image - np.min(image))
ret /= (np.max(image) - np.min(image) + eps)
return ret
def normalise_one_one(image):
print("Here 2")
ret = normalise_zero_one(image)
ret *= 2.
ret -= 1.
return ret
def preprocess_test_image(image):
print("Here")
#image = normalise_one_one(image, -250, 250)
image = normalise_one_one(image)
return image
##################
def find_max(img):
return np.unravel_index(np.argmax(img, axis=None), img.shape)[0]
#Read arguments
#############################
import argparse
msg = "Adding description"
# Initialize parser
parser = argparse.ArgumentParser(description = msg)
# Reading the input arguments
parser.add_argument("-i", "--Input", help = "Input file or folder")
parser.add_argument('-test_name', type=str, default='Test')
# Read arguments from command line
args = parser.parse_args()
path = args.Input
test_name = args.test_name
#Creating the result structure variables
main = os.getcwd()
directory = os.path.join(main+'/NII_Data/'+path)
if not os.path.exists(main+'/Results/'+path+"/"):
os.mkdir(main+'/Results/'+path+'/')
out = os.path.join(main+'/Results/'+path+"/"+test_name+'/')
if os.path.exists(out):
shutil.rmtree(out)
os.mkdir(out)
if not os.path.exists(out):
os.mkdir(out)
out_yes = os.path.join(out+'/Yes')
if not os.path.exists(out_yes):
os.mkdir(out_yes)
out_no = os.path.join(out+'/No')
if not os.path.exists(out_no):
os.mkdir(out_no)
out_rev = os.path.join(out+'/Review/')
if not os.path.exists(out_rev):
os.mkdir(out_rev)
out_csv = os.path.join(out+'/Pred CSVs/')
if not os.path.exists(out_csv):
os.mkdir(out_csv)
#Load the sarcopenia-ai models
#set_session(sess)
model_wrapper = BaseModelWrapper(settings.SLICE_DETECTION_MODEL_PATH)
model_wrapper.setup_model()
global slice_detection_model
slice_detection_model= model_wrapper.model
slice_detection_model._make_predict_function()
global segmentation_model
model_wrapper = BaseModelWrapper(settings.SEGMENTATION_MODEL_PATH)
model_wrapper.setup_model()
segmentation_model = model_wrapper.model
segmentation_model._make_predict_function()
####Updated functions to replace older versions listed in the sarcopenia-ai enviroment
#Previous research indicates adjusting the HU range can help bone appear better
def reduce_hu_intensity_range(img, minv=100, maxv=1500):
img = np.clip(img, minv, maxv)
img = 255 * normalise_zero_one(img)
return img
#Setting up the output file name & Prediction counter
pred_id = 0
cols = ['Folder_Path','Patient_Folder','Study_Folder','Serie_Folder','L3_detection','L3_position','Total_slices','Confidence','Slice_Thickness', 'Orientation']
lst = []
#Looping through the input folder and analyzing the images
for folder in os.listdir(directory):
#Patient Folder
if(folder=='.DS_Store'):
continue
#Study Folder
for sub_folder in os.listdir(directory+"/"+folder):
if(sub_folder=='.DS_Store'):
continue
#Series Folder
for sub_sub_folder in os.listdir(directory+"/"+folder+"/"+sub_folder):
#Image Level
for file in os.listdir(directory+"/"+folder+"/"+sub_folder+"/"+sub_sub_folder):
print("IN SUB-SUB-FOLDER: "+sub_sub_folder)
#print(file)
if(file.endswith(".nii.gz") or file.endswith(".nii")):
print("Processing file: "+file)
try:
if(sub_sub_folder=='.DS_Store'):
continue
print("IN SUB-SUB-FOLDER: "+sub_sub_folder)
image_path = directory+"/"+folder+"/"+sub_folder+"/"+sub_sub_folder+"/"+file
prob_threshold_U=settings.THRESHOLD_U
prob_threshold_L=settings.THRESHOLD_L
#Gathering image name
import ntpath
head, tail = ntpath.split(image_path)
image_name = tail or ntpath.basename(head)
pred_id = pred_id +1
print("ID --> "+str(pred_id))
results = {"success": False, "prediction": {'id': pred_id}}
sitk_image, _ = load_image(image_path)
print("-----------------------------image path: "+image_path )
#The code is not set up to analyze 4 dimensional data.
if len(sitk_image.GetSize()) == 4:
print("-------- 4D Image: Grabbing only first volume")
sitk_image = sitk_image[:, :, :, 0]
#Getting image orientation information for output file.
print('-------------- NIB')
nib_image = nib.load(image_path)
orient_nib=nib.orientations.aff2axcodes(nib_image.affine)
print('-------------- Preprocess')
#Preprocessing the image
image2d, image2d_preview= preprocess_sitk_image_for_slice_detection(sitk_image)
image3d = sitk.GetArrayFromImage(sitk_image)
#print(image3d.shape)
#print(image2d.shape)
#print(image2d_preview.shape)
spacing = sitk_image.GetSpacing()
size = list(sitk_image.GetSize())
slice_thickness = spacing[2]
#Utilizing the sarcopenia-ai model to predict the L3 vertabrae
with graph.as_default():
set_session(sess)
preds = slice_detection_model.predict(image2d)
print('-------------- Predict')
#Processing the model output
pred_z, prob = decode_slice_detection_prediction(preds)
slice_z = adjust_detected_position_spacing(pred_z, spacing)
print('Prob: '+ str(prob))
print('Slice Z: ' +str(slice_z) )
print('{red_z: '+str(pred_z))
#Normalizing the prediction image to be within %28-%47 percent of the body
new_z_calculate = 0
new_pred_z = pred_z
new_slice_z = slice_z
new_prob = prob
print('-------------- Normalize')
if(slice_z < .27*size[2] or slice_z > .48*size[2]):
print("---------------------debug")
print(preds.shape)
print(preds.shape[1])
new_pred_z = find_max(preds[0, int(.27*preds.shape[1]):int(.48*preds.shape[1])])
new_pred_z = new_pred_z + int(.27*preds.shape[1]);
new_slice_z = adjust_detected_position_spacing(new_pred_z, spacing)
print("old position")
print(pred_z)
print(slice_z)
print("new position")
print(new_pred_z)
print(new_slice_z)
new_z_calculate =1;
new_prob = float(preds[0,new_pred_z])
## Outputting prediction data
print('-------------- Predict CSV')
preds_reshaped = preds.reshape(preds.shape[0], -1)
numpy.savetxt(out_csv+"PRED_"+str(pred_id)+".csv", preds_reshaped, delimiter=",")
#If the prediction for L3 is above the predifined threshold for acceptance
if (new_prob > prob_threshold_U):
print('-------------- Above')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], pred_z, pred_z, r=1)
image2dB = place_line_on_img(image2d[0], -new_pred_z, new_pred_z, r=1)
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_SL.jpg', to256(slice_image))
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_FR.jpg', to256(image2dA))
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_FR2.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'YES',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
#Images where the L3 vertabrae was not identified
elif (new_prob <= prob_threshold_L ):
print('-------------- No')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], -pred_z, -pred_z, r=1)
image2dB = place_line_on_img(image2d[0], -new_pred_z, -new_pred_z, r=1)
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_SL.jpg', to256(slice_image))
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_FR.jpg', to256(image2dA))
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_FR2.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'NO',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
#Images where the L3 vertabrae was identified but confidence requirements were not met.
else:
print('-------------- Review')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], pred_z, pred_z, r=1)
image2dB = place_line_on_img(image2d[0], new_pred_z, new_pred_z, r=1)
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_SL_'+str(new_slice_z)+'_PROB_'+str(new_prob)+'.jpg', to256(slice_image))
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_FR_'+str(slice_z)+'_PROB_'+str(prob)+'.jpg', to256(image2dA))
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_FR2_'+str(new_slice_z)+'_PROB_'+str(new_prob)+'.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'REVIEW',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
#Images that error out (e.g. image orientation is incorrect)
except:
print('-------------- Wrong')
print('-------------- ')
print('-------------- ')
print("Something went wrong - File: "+image_path)
print("Unexpected error"+str(sys.exc_info()[0]))
output = [image_path,folder,sub_folder,sub_sub_folder,'Error','','','Something went wrong:'+str(sys.exc_info()[1]),'', orient_nib]
lst.append(output)
#Outputting the results dataset
df = pd.DataFrame(lst, columns=cols)
if not os.path.exists('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/'):
os.mkdir('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/')
df.to_csv('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/'+path+'_'+test_name+".csv")
print(' ')
print(' ')
print(' ')
print(' -------------- PROCESSING COMPLETE ------------------- ')
| detection.py | 14,114 | Importing Libraries image = normalise_one_one(image, -250, 250) Read arguments Initialize parser Reading the input arguments Read arguments from command line Creating the result structure variablesLoad the sarcopenia-ai models set_session(sess)Updated functions to replace older versions listed in the sarcopenia-ai enviromentPrevious research indicates adjusting the HU range can help bone appear better Setting up the output file name & Prediction counter Looping through the input folder and analyzing the images Patient FolderStudy Folder Series Folder Image Level print(file)Gathering image name The code is not set up to analyze 4 dimensional data.Getting image orientation information for output file. Preprocessing the image print(image3d.shape)print(image2d.shape)print(image2d_preview.shape)Utilizing the sarcopenia-ai model to predict the L3 vertabrae Processing the model output Normalizing the prediction image to be within %28-%47 percent of the body Outputting prediction data If the prediction for L3 is above the predifined threshold for acceptance Images where the L3 vertabrae was not identified Images where the L3 vertabrae was identified but confidence requirements were not met. Images that error out (e.g. image orientation is incorrect)Outputting the results dataset | 1,297 | en | 0.719551 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for ConvertLocationAndScaleParameters
"""
import unittest
import numpy as np
from scipy import stats
from iris.tests import IrisTest
from improver.ensemble_copula_coupling.ensemble_copula_coupling import (
ConvertLocationAndScaleParameters as Plugin)
class Test__init__(IrisTest):
"""Test the __init__ method."""
def test_valid_distribution(self):
"""Test for a valid distribution."""
plugin = Plugin(distribution="norm")
self.assertEqual(plugin.distribution, stats.norm)
self.assertEqual(plugin.shape_parameters, [])
def test_valid_distribution_with_shape_parameters(self):
"""Test for a valid distribution with shape parameters."""
plugin = Plugin(distribution="truncnorm", shape_parameters=[0, np.inf])
self.assertEqual(plugin.distribution, stats.truncnorm)
self.assertEqual(plugin.shape_parameters, [0, np.inf])
def test_invalid_distribution(self):
"""Test for an invalid distribution."""
msg = "The distribution requested"
with self.assertRaisesRegex(AttributeError, msg):
Plugin(distribution="elephant")
class Test__repr__(IrisTest):
"""Test string representation of plugin."""
def test_basic(self):
"""Test string representation"""
expected_string = ("<ConvertLocationAndScaleParameters: "
"distribution: norm; shape_parameters: []>")
result = str(Plugin())
self.assertEqual(result, expected_string)
class Test__rescale_shape_parameters(IrisTest):
"""Test the _rescale_shape_parameters"""
def setUp(self):
"""Set up values for testing."""
self.location_parameter = np.array([-1, 0, 1])
self.scale_parameter = np.array([1, 1.5, 2])
def test_truncated_at_zero(self):
"""Test scaling shape parameters implying a truncation at zero."""
expected = [np.array([1., 0, -0.5]),
np.array([np.inf, np.inf, np.inf])]
shape_parameters = [0, np.inf]
plugin = Plugin(distribution="truncnorm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
def test_discrete_shape_parameters(self):
"""Test scaling discrete shape parameters."""
expected = [np.array([-3, -2.666667, -2.5]), np.array([7, 4, 2.5])]
shape_parameters = [-4, 6]
plugin = Plugin(distribution="truncnorm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
def test_alternative_distribution(self):
"""Test specifying a distribution other than truncated normal. In
this instance, no rescaling is applied."""
shape_parameters = [0, np.inf]
plugin = Plugin(distribution="norm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayEqual(plugin.shape_parameters, shape_parameters)
def test_no_shape_parameters_exception(self):
"""Test raising an exception when shape parameters are not specified
for the truncated normal distribution."""
plugin = Plugin(distribution="truncnorm")
msg = "For the truncated normal distribution"
with self.assertRaisesRegex(ValueError, msg):
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
if __name__ == '__main__':
unittest.main()
| improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py | 5,460 | Test the __init__ method.
Test string representation of plugin.
Test the _rescale_shape_parameters
Set up values for testing.
Test specifying a distribution other than truncated normal. In
this instance, no rescaling is applied.
Test string representation
Test scaling discrete shape parameters.
Test for an invalid distribution.
Test raising an exception when shape parameters are not specified
for the truncated normal distribution.
Test scaling shape parameters implying a truncation at zero.
Test for a valid distribution.
Test for a valid distribution with shape parameters.
Unit tests for ConvertLocationAndScaleParameters
-*- coding: utf-8 -*- ----------------------------------------------------------------------------- (C) British Crown Copyright 2017-2019 Met Office. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 2,232 | en | 0.805121 |
"""
Item class for Jaseci
Each item has an id, name, timestamp.
"""
from jaseci.element.element import element
class item(element):
"""Item class for Jaseci"""
def __init__(self, value=None, *args, **kwargs):
self.item_value = value
super().__init__(*args, **kwargs)
@property
def value(self):
return self.item_value
@value.setter
def value(self, val):
self.item_value = val
self.save()
def __str__(self):
if self.value:
return super().__str__() + f":{self.value}"
else:
return super().__str__() + ":None"
| jaseci_core/jaseci/attr/item.py | 621 | Item class for Jaseci
Item class for Jaseci
Each item has an id, name, timestamp. | 82 | en | 0.695268 |
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## Email: zhanghang0704@gmail.com
## Copyright (c) 2020
##
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import time
import argparse
import importlib
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.backends.cudnn as cudnn
from torch.nn.parallel import DistributedDataParallel
import autotorch as at
import encoding
from encoding.nn import LabelSmoothing, NLLMultiLabelSmooth
from encoding.utils import (accuracy, AverageMeter, MixUpWrapper, LR_Scheduler, torch_dist_sum)
try:
import apex
from apex import amp
except ModuleNotFoundError:
print('please install amp if using float16 training')
class Options():
def __init__(self):
# data settings
parser = argparse.ArgumentParser(description='Deep Encoding')
parser.add_argument('--dataset', type=str, default='imagenet',
help='training dataset (default: imagenet)')
parser.add_argument('--base-size', type=int, default=None,
help='base image size')
parser.add_argument('--crop-size', type=int, default=224,
help='crop image size')
parser.add_argument('--label-smoothing', type=float, default=0.0,
help='label-smoothing (default eta: 0.0)')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup (default eta: 0.0)')
parser.add_argument('--auto-policy', type=str, default=None,
help='path to auto augment policy')
parser.add_argument('--data-dir', type=str, default=os.path.expanduser('~/.encoding/data'),
help='data location for training')
# model params
#parser.add_argument('--model', type=str, default='resnet50',
# help='network model type (default: densenet)')
parser.add_argument('--arch', type=str, default='regnet',
help='network type (default: regnet)')
parser.add_argument('--config-file', type=str, required=True,
help='network node config file')
parser.add_argument('--last-gamma', action='store_true', default=False,
help='whether to init gamma of the last BN layer in \
each bottleneck to 0 (default: False)')
# training params
parser.add_argument('--amp', action='store_true',
default=False, help='using amp')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=120, metavar='N',
help='number of epochs to train (default: 600)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='the epoch number to start (default: 1)')
parser.add_argument('--workers', type=int, default=8,
metavar='N', help='dataloader threads')
# optimizer
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--lr-scheduler', type=str, default='cos',
help='learning rate scheduler (default: cos)')
parser.add_argument('--warmup-epochs', type=int, default=0,
help='number of warmup epochs (default: 0)')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4,
metavar ='M', help='SGD weight decay (default: 1e-4)')
parser.add_argument('--no-bn-wd', action='store_true',
default=False, help='no bias decay')
# seed
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default='default',
help='set the checkpoint name')
# distributed
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
# evaluation option
parser.add_argument('--eval', action='store_true', default= False,
help='evaluating')
parser.add_argument('--export', type=str, default=None,
help='put the path to resuming file if needed')
self.parser = parser
def parse(self):
args = self.parser.parse_args()
return args
def main():
args = Options().parse()
ngpus_per_node = torch.cuda.device_count()
args.world_size = ngpus_per_node * args.world_size
args.lr = args.lr * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
# global variable
best_pred = 0.0
acclist_train = []
acclist_val = []
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
args.rank = args.rank * ngpus_per_node + gpu
# model name for checkpoint
args.model = "{}-{}".format(args.arch, os.path.splitext(os.path.basename(args.config_file))[0])
if args.gpu == 0:
print('model:', args.model)
print('rank: {} / {}'.format(args.rank, args.world_size))
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
torch.cuda.set_device(args.gpu)
# init the args
global best_pred, acclist_train, acclist_val
if args.gpu == 0:
print(args)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cudnn.benchmark = True
# init dataloader
transform_train, transform_val = encoding.transforms.get_transform(
args.dataset, args.base_size, args.crop_size)
if args.auto_policy is not None:
print(f'Using auto_policy: {args.auto_policy}')
from augment import Augmentation
auto_policy = Augmentation(at.load(args.auto_policy))
transform_train.transforms.insert(0, auto_policy)
trainset = encoding.datasets.get_dataset(args.dataset, root=args.data_dir,
transform=transform_train, train=True, download=True)
valset = encoding.datasets.get_dataset(args.dataset, root=args.data_dir,
transform=transform_val, train=False, download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=train_sampler)
val_sampler = torch.utils.data.distributed.DistributedSampler(valset, shuffle=False)
val_loader = torch.utils.data.DataLoader(
valset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=val_sampler)
# init the model
arch = importlib.import_module('arch.' + args.arch)
model = arch.config_network(args.config_file)
if args.gpu == 0:
print(model)
if args.mixup > 0:
train_loader = MixUpWrapper(args.mixup, 1000, train_loader, args.gpu)
criterion = NLLMultiLabelSmooth(args.label_smoothing)
elif args.label_smoothing > 0.0:
criterion = LabelSmoothing(args.label_smoothing)
else:
criterion = nn.CrossEntropyLoss()
model.cuda(args.gpu)
criterion.cuda(args.gpu)
# criterion and optimizer
if args.no_bn_wd:
parameters = model.named_parameters()
param_dict = {}
for k, v in parameters:
param_dict[k] = v
bn_params = [v for n, v in param_dict.items() if ('bn' in n or 'bias' in n)]
rest_params = [v for n, v in param_dict.items() if not ('bn' in n or 'bias' in n)]
if args.gpu == 0:
print(" Weight decay NOT applied to BN parameters ")
print(f'len(parameters): {len(list(model.parameters()))} = {len(bn_params)} + {len(rest_params)}')
optimizer = torch.optim.SGD([{'params': bn_params, 'weight_decay': 0 },
{'params': rest_params, 'weight_decay': args.wd}],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd)
else:
optimizer = torch.optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd)
if args.amp:
#optimizer = amp_handle.wrap_optimizer(optimizer)
model, optimizer = amp.initialize(model, optimizer, opt_level='O2')
#from apex import amp
DDP = apex.parallel.DistributedDataParallel
model = DDP(model, delay_allreduce=True)
else:
DDP = DistributedDataParallel
model = DDP(model, device_ids=[args.gpu])
# check point
if args.resume is not None:
if os.path.isfile(args.resume):
if args.gpu == 0:
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch'] + 1 if args.start_epoch == 0 else args.start_epoch
best_pred = checkpoint['best_pred']
acclist_train = checkpoint['acclist_train']
acclist_val = checkpoint['acclist_val']
model.module.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.amp:
amp.load_state_dict(checkpoint['amp'])
if args.gpu == 0:
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
raise RuntimeError ("=> no resume checkpoint found at '{}'".\
format(args.resume))
scheduler = LR_Scheduler(args.lr_scheduler,
base_lr=args.lr,
num_epochs=args.epochs,
iters_per_epoch=len(train_loader),
warmup_epochs=args.warmup_epochs)
def train(epoch):
train_sampler.set_epoch(epoch)
model.train()
losses = AverageMeter()
top1 = AverageMeter()
global best_pred, acclist_train
tic = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
scheduler(optimizer, batch_idx, epoch, best_pred)
if not args.mixup:
data, target = data.cuda(args.gpu), target.cuda(args.gpu)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
if not args.mixup:
acc1 = accuracy(output, target, topk=(1,))
top1.update(acc1[0], data.size(0))
losses.update(loss.item(), data.size(0))
if batch_idx % 100 == 0 and args.gpu == 0:
iter_per_sec = 100.0 / (time.time() - tic) if batch_idx != 0 else 1.0 / (time.time() - tic)
tic = time.time()
if args.mixup:
#print('Batch: %d| Loss: %.3f'%(batch_idx, losses.avg))
print('Epoch: {}, Iter: {}, Speed: {:.3f} iter/sec, Train loss: {:.3f}'. \
format(epoch, batch_idx, iter_per_sec, losses.avg.item()))
else:
#print('Batch: %d| Loss: %.3f | Top1: %.3f'%(batch_idx, losses.avg, top1.avg))
print('Epoch: {}, Iter: {}, Speed: {:.3f} iter/sec, Top1: {:.3f}'. \
format(epoch, batch_idx, iter_per_sec, top1.avg.item()))
acclist_train += [top1.avg]
def validate(epoch):
model.eval()
top1 = AverageMeter()
top5 = AverageMeter()
global best_pred, acclist_train, acclist_val
is_best = False
for batch_idx, (data, target) in enumerate(val_loader):
data, target = data.cuda(args.gpu), target.cuda(args.gpu)
with torch.no_grad():
output = model(data)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], data.size(0))
top5.update(acc5[0], data.size(0))
# sum all
sum1, cnt1, sum5, cnt5 = torch_dist_sum(args.gpu, top1.sum, top1.count, top5.sum, top5.count)
if args.eval:
if args.gpu == 0:
top1_acc = sum(sum1) / sum(cnt1)
top5_acc = sum(sum5) / sum(cnt5)
print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))
return
if args.gpu == 0:
top1_acc = sum(sum1) / sum(cnt1)
top5_acc = sum(sum5) / sum(cnt5)
print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))
# save checkpoint
acclist_val += [top1_acc]
if top1_acc > best_pred:
best_pred = top1_acc
is_best = True
state_dict = {
'epoch': epoch,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'best_pred': best_pred,
'acclist_train':acclist_train,
'acclist_val':acclist_val,
}
if args.amp:
state_dict['amp'] = amp.state_dict()
encoding.utils.save_checkpoint(state_dict, args=args, is_best=is_best)
if args.export:
if args.gpu == 0:
torch.save(model.module.state_dict(), args.export + '.pth')
return
if args.eval:
validate(args.start_epoch)
return
for epoch in range(args.start_epoch, args.epochs):
tic = time.time()
train(epoch)
if epoch % 10 == 0:# or epoch == args.epochs-1:
validate(epoch)
elapsed = time.time() - tic
if args.gpu == 0:
print(f'Epoch: {epoch}, Time cost: {elapsed}')
if args.gpu == 0:
encoding.utils.save_checkpoint({
'epoch': args.epochs-1,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'best_pred': best_pred,
'acclist_train':acclist_train,
'acclist_val':acclist_val,
}, args=args, is_best=False)
if __name__ == "__main__":
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
main()
| train.py | 16,174 | +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Created by: Hang Zhang Email: zhanghang0704@gmail.com Copyright (c) 2020 LICENSE file in the root directory of this source tree +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ data settings model params parser.add_argument('--model', type=str, default='resnet50', help='network model type (default: densenet)') training params optimizer seed checking point distributed evaluation option global variable model name for checkpoint init the args init dataloader init the model criterion and optimizeroptimizer = amp_handle.wrap_optimizer(optimizer)from apex import amp check pointprint('Batch: %d| Loss: %.3f'%(batch_idx, losses.avg))print('Batch: %d| Loss: %.3f | Top1: %.3f'%(batch_idx, losses.avg, top1.avg)) sum all save checkpoint or epoch == args.epochs-1: | 878 | en | 0.467402 |
"""
Implements harmonic_mean() function.
"""
from .mean import mean
def harmonic_mean(x):
"""
The `harmonic mean`_ is a kind of average that is calculated as
the reciprocal_ of the arithmetic mean of the reciprocals.
It is appropriate when calculating averages of rates_.
.. _`harmonic mean`: https://en.wikipedia.org/wiki/Harmonic_mean
.. _reciprocal: https://en.wikipedia.org/wiki/Multiplicative_inverse
.. _rates: https://en.wikipedia.org/wiki/Rate_(mathematics)
Equation:
.. math::
H = \\frac{n}{\\frac{1}{x_1}+\\frac{1}{x_2}+\\ldots+\\frac{1}{x_n}} =
\\frac{n}{\\sum\\limits_{i=1}^n \\frac{1}{x_i}}
Args:
x: A list or tuple of numerical objects.
Returns:
A numerical object.
Raises:
TypeError: If the user passes something other than list or tuple.
Examples:
>>> harmonic_mean([1, 2, 4])
1.7142857142857142
>>> harmonic_mean(7)
Traceback (most recent call last):
...
TypeError: harmonic_mean() expects a list or a tuple.
"""
if type(x) not in [list, tuple]:
raise TypeError('harmonic_mean() expects a list or a tuple.')
reciprocals = [1 / float(num) for num in x]
# sum_of_reciprocals = sum(reciprocals[:])
return(1 / mean(reciprocals))
| simplestatistics/statistics/harmonic_mean.py | 1,337 | The `harmonic mean`_ is a kind of average that is calculated as
the reciprocal_ of the arithmetic mean of the reciprocals.
It is appropriate when calculating averages of rates_.
.. _`harmonic mean`: https://en.wikipedia.org/wiki/Harmonic_mean
.. _reciprocal: https://en.wikipedia.org/wiki/Multiplicative_inverse
.. _rates: https://en.wikipedia.org/wiki/Rate_(mathematics)
Equation:
.. math::
H = \frac{n}{\frac{1}{x_1}+\frac{1}{x_2}+\ldots+\frac{1}{x_n}} =
\frac{n}{\sum\limits_{i=1}^n \frac{1}{x_i}}
Args:
x: A list or tuple of numerical objects.
Returns:
A numerical object.
Raises:
TypeError: If the user passes something other than list or tuple.
Examples:
>>> harmonic_mean([1, 2, 4])
1.7142857142857142
>>> harmonic_mean(7)
Traceback (most recent call last):
...
TypeError: harmonic_mean() expects a list or a tuple.
Implements harmonic_mean() function.
sum_of_reciprocals = sum(reciprocals[:]) | 968 | en | 0.742695 |
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# import logging
import json
import logging
import subprocess as sp
import boto3
import pytest
from assertpy import assert_that
from utils import get_root_volume_id
@pytest.mark.regions(["ap-southeast-1"])
@pytest.mark.instances(["c5.xlarge"])
@pytest.mark.oss(["alinux2"])
@pytest.mark.schedulers(["slurm", "awsbatch"])
@pytest.mark.usefixtures("region", "instance")
def test_tag_propagation(pcluster_config_reader, clusters_factory, scheduler, os):
"""
Verify tags from various sources are propagated to the expected resources.
The following resources are checked for tags:
- main CFN stack
- head node
- head node's root EBS volume
- compute node (traditional schedulers)
- compute node's root EBS volume (traditional schedulers)
- shared EBS volume
"""
config_file_tags = {"ConfigFileTag": "ConfigFileTagValue"}
version_tags = {"parallelcluster:version": get_pcluster_version()}
cluster_config = pcluster_config_reader()
cluster = clusters_factory(cluster_config)
cluster_name_tags = {"parallelcluster:cluster-name": cluster.name}
test_cases = [
{
"resource": "Main CloudFormation Stack",
"tag_getter": get_main_stack_tags,
"expected_tags": (version_tags, config_file_tags),
},
{
"resource": "Head Node",
"tag_getter": get_head_node_tags,
"expected_tags": (
cluster_name_tags,
{"Name": "HeadNode", "parallelcluster:node-type": "HeadNode"},
),
},
{
"resource": "Head Node Root Volume",
"tag_getter": get_head_node_root_volume_tags,
"expected_tags": (cluster_name_tags, {"parallelcluster:node-type": "HeadNode"}),
"tag_getter_kwargs": {"cluster": cluster, "os": os},
},
{
"resource": "Compute Node",
"tag_getter": get_compute_node_tags,
"expected_tags": (
cluster_name_tags,
{"Name": "Compute", "parallelcluster:node-type": "Compute"},
config_file_tags,
),
"skip": scheduler == "awsbatch",
},
{
"resource": "Compute Node Root Volume",
"tag_getter": get_compute_node_root_volume_tags,
"expected_tags": (
cluster_name_tags,
{"parallelcluster:node-type": "Compute"},
config_file_tags if scheduler == "slurm" else {},
),
"tag_getter_kwargs": {"cluster": cluster, "os": os},
"skip": scheduler == "awsbatch",
},
{
"resource": "Shared EBS Volume",
"tag_getter": get_shared_volume_tags,
"expected_tags": (version_tags, config_file_tags),
},
]
for test_case in test_cases:
if test_case.get("skip"):
continue
logging.info("Verifying tags were propagated to %s", test_case.get("resource"))
tag_getter = test_case.get("tag_getter")
# Assume tag getters use lone cluster object arg if none explicitly given
tag_getter_args = test_case.get("tag_getter_kwargs", {"cluster": cluster})
observed_tags = tag_getter(**tag_getter_args)
expected_tags = test_case["expected_tags"]
assert_that(observed_tags).contains(*convert_tags_dicts_to_tags_list(expected_tags))
def convert_tags_dicts_to_tags_list(tags_dicts):
"""Convert dicts of the form {key: value} to a list like [{"Key": key, "Value": value}]."""
tags_list = []
for tags_dict in tags_dicts:
tags_list.extend([{"Key": key, "Value": value} for key, value in tags_dict.items()])
return tags_list
def get_cloudformation_tags(region, stack_name):
"""
Return the tags for the CFN stack with the given name
The returned values is a list like the following:
[
{'Key': 'Key2', 'Value': 'Value2'},
{'Key': 'Key1', 'Value': 'Value1'},
]
"""
cfn_client = boto3.client("cloudformation", region_name=region)
response = cfn_client.describe_stacks(StackName=stack_name)
return response["Stacks"][0]["Tags"]
def get_main_stack_tags(cluster):
"""Return the tags for the cluster's main CFN stack."""
return get_cloudformation_tags(cluster.region, cluster.cfn_name)
def get_head_node_instance_id(cluster):
"""Return the given cluster's head node's instance ID."""
return cluster.cfn_resources.get("HeadNode")
def get_ec2_instance_tags(instance_id, region):
"""Return a list of tags associated with the given EC2 instance."""
logging.info("Getting tags for instance %s", instance_id)
return (
boto3.client("ec2", region_name=region)
.describe_instances(InstanceIds=[instance_id])
.get("Reservations")[0]
.get("Instances")[0]
.get("Tags")
)
def get_tags_for_volume(volume_id, region):
"""Return the tags attached to the given EBS volume."""
logging.info("Getting tags for volume %s", volume_id)
return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags")
def get_head_node_root_volume_tags(cluster, os):
"""Return the given cluster's head node's root volume's tags."""
head_node_instance_id = get_head_node_instance_id(cluster)
root_volume_id = get_root_volume_id(head_node_instance_id, cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
def get_head_node_tags(cluster):
"""Return the given cluster's head node's tags."""
head_node_instance_id = get_head_node_instance_id(cluster)
return get_ec2_instance_tags(head_node_instance_id, cluster.region)
def get_compute_node_root_volume_tags(cluster, os):
"""Return the given cluster's compute node's root volume's tags."""
compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute")
assert_that(compute_nodes).is_length(1)
root_volume_id = get_root_volume_id(compute_nodes[0], cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
def get_compute_node_tags(cluster):
"""Return the given cluster's compute node's tags."""
compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute")
assert_that(compute_nodes).is_length(1)
return get_ec2_instance_tags(compute_nodes[0], cluster.region)
def get_ebs_volume_tags(volume_id, region):
"""Return the tags associated with the given EBS volume."""
return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags")
def get_shared_volume_tags(cluster):
"""Return the given cluster's EBS volume's tags."""
shared_volume = cluster.cfn_resources.get("EBS0")
return get_ebs_volume_tags(shared_volume, cluster.region)
def get_pcluster_version():
"""Return the installed version of the pclsuter CLI."""
return json.loads(sp.check_output("pcluster version".split()).decode().strip()).get("version")
| tests/integration-tests/tests/tags/test_tag_propagation.py | 7,586 | Convert dicts of the form {key: value} to a list like [{"Key": key, "Value": value}].
Return the tags for the CFN stack with the given name
The returned values is a list like the following:
[
{'Key': 'Key2', 'Value': 'Value2'},
{'Key': 'Key1', 'Value': 'Value1'},
]
Return the given cluster's compute node's root volume's tags.
Return the given cluster's compute node's tags.
Return the tags associated with the given EBS volume.
Return a list of tags associated with the given EC2 instance.
Return the given cluster's head node's instance ID.
Return the given cluster's head node's root volume's tags.
Return the given cluster's head node's tags.
Return the tags for the cluster's main CFN stack.
Return the installed version of the pclsuter CLI.
Return the given cluster's EBS volume's tags.
Return the tags attached to the given EBS volume.
Verify tags from various sources are propagated to the expected resources.
The following resources are checked for tags:
- main CFN stack
- head node
- head node's root EBS volume
- compute node (traditional schedulers)
- compute node's root EBS volume (traditional schedulers)
- shared EBS volume
Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and limitations under the License. import logging Assume tag getters use lone cluster object arg if none explicitly given | 1,774 | en | 0.750792 |
import torch
import random
import numpy as np
class InfiniteDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
def make_deterministic(seed=0):
"""Make results deterministic. If seed == -1, do not make deterministic.
Running your script in a deterministic way might slow it down.
Note that for some packages (eg: sklearn's PCA) this function is not enough.
"""
seed = int(seed)
if seed == -1:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def setup_logging(output_folder, exist_ok=False, console="debug",
info_filename="info.log", debug_filename="debug.log"):
"""Set up logging files and console output.
Creates one file for INFO logs and one for DEBUG logs.
Args:
output_folder (str): creates the folder where to save the files.
exist_ok (boolean): if False throw a FileExistsError if output_folder already exists
debug (str):
if == "debug" prints on console debug messages and higher
if == "info" prints on console info messages and higher
if == None does not use console (useful when a logger has already been set)
info_filename (str): the name of the info file. if None, don't create info file
debug_filename (str): the name of the debug file. if None, don't create debug file
"""
import os
import sys
import logging
import traceback
if not exist_ok and os.path.exists(output_folder):
raise FileExistsError(f"{output_folder} already exists!")
os.makedirs(output_folder, exist_ok=True)
base_formatter = logging.Formatter('%(asctime)s %(message)s', "%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
if info_filename != None:
info_file_handler = logging.FileHandler(f'{output_folder}/{info_filename}')
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(base_formatter)
logger.addHandler(info_file_handler)
if debug_filename != None:
debug_file_handler = logging.FileHandler(f'{output_folder}/{debug_filename}')
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(base_formatter)
logger.addHandler(debug_file_handler)
if console != None:
console_handler = logging.StreamHandler()
if console == "debug": console_handler.setLevel(logging.DEBUG)
if console == "info": console_handler.setLevel(logging.INFO)
console_handler.setFormatter(base_formatter)
logger.addHandler(console_handler)
def my_handler(type_, value, tb):
logger.info("\n" + "".join(traceback.format_exception(type, value, tb)))
logging.info("Experiment finished (with some errors)")
sys.excepthook = my_handler
| commons.py | 3,395 | Make results deterministic. If seed == -1, do not make deterministic.
Running your script in a deterministic way might slow it down.
Note that for some packages (eg: sklearn's PCA) this function is not enough.
Set up logging files and console output.
Creates one file for INFO logs and one for DEBUG logs.
Args:
output_folder (str): creates the folder where to save the files.
exist_ok (boolean): if False throw a FileExistsError if output_folder already exists
debug (str):
if == "debug" prints on console debug messages and higher
if == "info" prints on console info messages and higher
if == None does not use console (useful when a logger has already been set)
info_filename (str): the name of the info file. if None, don't create info file
debug_filename (str): the name of the debug file. if None, don't create debug file | 872 | en | 0.75426 |
from bedlam import Game
from bedlam import Scene
from bedlam import Sprite
from balls import Ball
# __pragma__('skip')
document = window = Math = Date = console = 0 # Prevent complaints by optional static checker
# __pragma__('noskip')
# __pragma__('noalias', 'clear')
DEBUG = False
class PVector:
def __init__(self, xx=0, yy=0):
self.x = xx
self.y = yy
def __str__(self):
return "PVector({},{})".format(self.x, self.y)
def reset(self, xx, yy):
self.x = xx
self.y = yy
return self
def copy(self):
return PVector.Instance(self.x, self.y)
def add(self, v):
self.x = self.x + v.x
self.y = self.y + v.y
return self
def sub(self, v):
self.x = self.x - v.x
self.y = self.y - v.y
return self
def mult(self, mag):
self.x = self.x * mag
self.y = self.y * mag
return self
def div(self, mag):
self.x = self.x / mag
self.y = self.y / mag
return self
def normalize(self, mag=1.0):
d = Math.sqrt(self.x * self.x + self.y * self.y)
if d == 0 or mag == 0:
self.x = 0
self.y = 0
else:
self.x = mag * self.x / d
self.y = mag * self.y / d
return self
def limit(self, mag):
d = Math.sqrt(self.x * self.x + self.y * self.y)
if d == 0 or mag == 0:
return
if d > mag:
self.x = mag * self.x / d
self.y = mag * self.y / d
return self
def mag(self):
return Math.sqrt(self.x * self.x + self.y * self.y)
@classmethod
def Instance(cls, xx, yy):
if cls.pool is None:
cls.pool = []
cls.pool_max_size = 10
if len(cls.pool) == 0:
return PVector(xx, yy)
else:
v = cls.pool.pop()
v.x = xx
v.y = yy
return v
@classmethod
def Free(cls, pvector):
if len(cls.pool) < cls.pool_max_size:
cls.pool.append
class Boid(Sprite):
def __init__(self, game, w=10):
Sprite.__init__(self, game, w, w)
self.color = 'white'
self.x = self.game.canvas.width * Math.random()
self.y = self.game.canvas.height * Math.random()
angle = 2 * Math.PI * Math.random()
self.dx = self.game.speed * Math.cos(angle)
self.dy = self.game.speed * Math.sin(angle)
def is_close(self, sprite, dist):
return self.distance(sprite) + self.width / 2 + sprite.width / 2 <= dist
def distance(self, sprite):
vx = self.x - sprite.x
vy = self.y - sprite.y
self_radius = (self.width + self.height) / 2
sprite_radius = (sprite.width + sprite.height) / 2
dist = Math.sqrt(vx * vx + vy * vy) - (self_radius + sprite_radius)
return dist if dist >= 0 else 0
def draw(self, ctx):
global DEBUG
Sprite.draw(self, ctx)
angle = self._angle()
ctx.save()
ctx.globalCompositeOperation = 'source-over'
if DEBUG:
ctx.strokeStyle = '#808080'
ctx.beginPath()
ctx.arc(self.x, self.y, self.game.cohesion_radius, 0, 2 * Math.PI)
ctx.stroke()
ctx.strokeStyle = '#696969'
ctx.beginPath()
ctx.arc(self.x, self.y, self.game.separation_radius + self.width/2, 0, 2 * Math.PI)
ctx.stroke()
ctx.lineWidth = 2
ctx.strokeStyle = self.color
ctx.fillStyle = self.color
ctx.beginPath()
ctx.translate(self.x, self.y)
ctx.rotate(angle)
ctx.moveTo(-1 * self.width, -0.5 * self.width)
ctx.lineTo(self.width, 0)
ctx.lineTo(-1 * self.width, 0.5 * self.width)
ctx.lineTo(-1 * self.width, -0.5 * self.width)
ctx.translate(-1 * self.originX, -1 * self.originY)
ctx.fill()
ctx.stroke()
ctx.restore()
def _angle(self, a=0.0):
angle = Math.atan2(self.dy, self.dx) + a
while angle > 2 * Math.PI:
angle = angle - 2 * Math.PI
while angle < 0:
angle = angle + 2 * Math.PI
return angle
def _find(self, boid, dist, clazz=None):
return self.game.currentScene.find(boid, dist, clazz)
def update(self, delta_time):
global DEBUG
move = PVector.Instance(self.dx, self.dy)
allignment = self.__calc_allignment().mult(self.game.allignment_mult)
separation = self.__calc_separation().mult(self.game.separation_mult)
cohesion = self.__calc_cohesion().mult(self.game.cohesion_mult)
noise = self.__calc_random_noise().mult(self.game.noise_mult)
if DEBUG:
console.log('time={} : allign={} : avoid={} : noise={} : cohese={}'.format(delta_time, allignment.mag(),
separation.mag(), noise.mag(),
cohesion.mag()))
move.add(allignment)
move.add(separation)
move.add(cohesion)
move.add(noise)
move.limit(self.game.speed)
self.dx = move.x
self.dy = move.y
self.x = self.x + self.dx * delta_time / 1000.0
if self.x < 0:
self.x = self.x + self.game.canvas.width
elif self.x > self.game.canvas.width:
self.x = self.x - self.game.canvas.width
self.y = self.y + self.dy * delta_time / 1000.0
if self.y < 0:
self.y = self.y + self.game.canvas.height
elif self.y > self.game.canvas.height:
self.y = self.y - self.game.canvas.height
PVector.Free(move)
PVector.Free(allignment)
PVector.Free(separation)
PVector.Free(noise)
def __calc_allignment(self):
steer = PVector.Instance(0, 0)
for sprite in self._find(self, self.game.allignment_radius, Boid):
d = self.distance(sprite)
if d == 0:
continue
copy = PVector.Instance(sprite.dx, sprite.dy)
copy.normalize()
copy.div(d)
steer.add(copy)
return steer
def __calc_separation(self):
steer = PVector.Instance(0, 0)
for sprite in self._find(self, self.game.separation_radius, Sprite):
d = self.distance(sprite)
if d == 0:
continue
diff = PVector(self.x - sprite.x, self.y - sprite.y)
diff.normalize()
diff.div(d)
steer.add(diff)
return steer
def __calc_random_noise(self):
return PVector.Instance(Math.random() * 2 - 1, Math.random() * 2 - 1)
def __calc_cohesion(self):
steer = PVector.Instance(0, 0)
count = 0
for sprite in self._find(self, self.game.cohesion_radius, Boid):
steer.x = steer.x + sprite.x
steer.y = steer.y + sprite.y
count = count + 1
if count > 0:
steer.x = steer.x / count
steer.y = steer.y / count
steer.normalize(0.05)
return steer
class BoidsScene(Scene):
def __init__(self, game, name=None, num_boids=8, w=10):
Scene.__init__(self, game, name)
self.color = 'black'
for n in range(num_boids):
self.append(Boid(self.game, w))
for n in range(3):
self.append(Ball(self.game, 30, 10, 'green'))
for n in range(1):
self.append(Ball(self.game, 30, 20, 'red'))
def _clear_screen(self, ctx):
ctx.save()
ctx.globalCompositeOperation = 'copy'
ctx.fillStyle = self.color
ctx.fillRect(0, 0, self.game.canvas.width, self.game.canvas.height)
ctx.restore()
def find(self, boid, dist, clazz=None):
sprite_list = []
for sprite in self:
if clazz is not None and not isinstance(sprite, clazz):
continue
if sprite == boid:
continue
if boid.is_close(sprite, dist):
sprite_list.append(sprite)
return sprite_list
class BoidsGame(Game):
def __init__(self, name='Boids', loop_time=20):
Game.__init__(self, name, loop_time)
sprite_width = 5
global_scale = sprite_width / 10.0
self.speed = 100
self.allignment_radius = 180 * global_scale
self.separation_radius = 25 * global_scale
self.cohesion_radius = self.allignment_radius
self.allignment_mult = 3
self.separation_mult = 30
self.cohesion_mult = 25
self.noise_mult = 5
self.append(BoidsScene(self, 'BOIDS', 32, sprite_width))
@staticmethod
def set_debug(b):
global DEBUG
if b is not None and b == 'true':
DEBUG = True
| boids.py | 8,899 | __pragma__('skip') Prevent complaints by optional static checker __pragma__('noskip') __pragma__('noalias', 'clear') | 116 | en | 0.299979 |
# -*- coding: utf-8 -*-
from pandas import DataFrame
from pandas_ta.utils import get_offset, verify_series
def donchian(high, low, lower_length=None, upper_length=None, offset=None, **kwargs):
"""Indicator: Donchian Channels (DC)"""
# Validate arguments
high = verify_series(high)
low = verify_series(low)
lower_length = int(lower_length) if lower_length and lower_length > 0 else 20
upper_length = int(upper_length) if upper_length and upper_length > 0 else 20
lower_min_periods = int(kwargs["lower_min_periods"]) if "lower_min_periods" in kwargs and kwargs["lower_min_periods"] is not None else lower_length
upper_min_periods = int(kwargs["upper_min_periods"]) if "upper_min_periods" in kwargs and kwargs["upper_min_periods"] is not None else upper_length
offset = get_offset(offset)
# Calculate Result
lower = low.rolling(lower_length, min_periods=lower_min_periods).min()
upper = high.rolling(upper_length, min_periods=upper_min_periods).max()
mid = 0.5 * (lower + upper)
# Handle fills
if "fillna" in kwargs:
lower.fillna(kwargs["fillna"], inplace=True)
mid.fillna(kwargs["fillna"], inplace=True)
upper.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
lower.fillna(method=kwargs["fill_method"], inplace=True)
mid.fillna(method=kwargs["fill_method"], inplace=True)
upper.fillna(method=kwargs["fill_method"], inplace=True)
# Offset
if offset != 0:
lower = lower.shift(offset)
mid = mid.shift(offset)
upper = upper.shift(offset)
# Name and Categorize it
lower.name = f"DCL_{lower_length}_{upper_length}"
mid.name = f"DCM_{lower_length}_{upper_length}"
upper.name = f"DCU_{lower_length}_{upper_length}"
mid.category = upper.category = lower.category = "volatility"
# Prepare DataFrame to return
data = {lower.name: lower, mid.name: mid, upper.name: upper}
dcdf = DataFrame(data)
dcdf.name = f"DC_{lower_length}_{upper_length}"
dcdf.category = mid.category
return dcdf
donchian.__doc__ = \
"""Donchian Channels (DC)
Donchian Channels are used to measure volatility, similar to
Bollinger Bands and Keltner Channels.
Sources:
https://www.tradingview.com/wiki/Donchian_Channels_(DC)
Calculation:
Default Inputs:
lower_length=upper_length=20
LOWER = low.rolling(lower_length).min()
UPPER = high.rolling(upper_length).max()
MID = 0.5 * (LOWER + UPPER)
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
lower_length (int): The short period. Default: 20
upper_length (int): The short period. Default: 20
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: lower, mid, upper columns.
"""
| pandas_ta/volatility/donchian.py | 2,935 | Indicator: Donchian Channels (DC)
-*- coding: utf-8 -*- Validate arguments Calculate Result Handle fills Offset Name and Categorize it Prepare DataFrame to return | 164 | en | 0.391952 |
import unittest
from unittest.mock import Mock
# PyATS
from pyats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
from genie.libs.parser.asa.show_vpn import ShowVPNLoadBalancing
# ============================================
# unit test for 'show vpn load-balancing'
# =============================================
class TestShowVPNLoadBalancing(unittest.TestCase):
"""
unit test for show vpn load-balancing
"""
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
maxDiff = None
golden_parsed_output = {
'cluster_ip': 'cluster1',
'encryption': 'Enabled',
'failover': 'n/a',
'peers': {
1: {
'load_balancing_version': 4,
'model': 'ASA-VASA',
'pri': 5,
'public_ip': '10.246.0.1*',
'role': 'Master',
},
2: {
'load_balancing_version': 4,
'model': 'ASA-VASA',
'pri': 5,
'public_ip': '10.246.0.2',
'role': 'Backup',
},
},
'peers_count': 1,
'role': 'Master',
'status': 'Enabled',
'total_license_load': {
1: {
'anyconnect_premium_essentials': {
'limit': 250,
'load': 0,
'used': 0,
},
'other_vpn': {
'limit': 250,
'load': 1,
'used': 2,
},
'public_ip': '10.246.0.1*',
},
2: {
'anyconnect_premium_essentials': {
'limit': 0,
'load': 0,
'used': 0,
},
'other_vpn': {
'limit': 0,
'load': 0,
'used': 0,
},
'public_ip': '10.246.0.2',
},
},
}
golden_output = {'execute.return_value': '''
vASA-VPN-20#show vpn load-balancing
--------------------------------------------------------------------------
Status Role Failover Encryption Peers Cluster IP
--------------------------------------------------------------------------
Enabled Master n/a Enabled 1 cluster1
Peers:
--------------------------------------------------------------------------
Role Pri Model Load-Balancing Version Public IP
--------------------------------------------------------------------------
Master 5 ASA-VASA 4 10.246.0.1*
Backup 5 ASA-VASA 4 10.246.0.2
Total License Load:
--------------------------------------------------------------------------
AnyConnect Premium/Essentials Other VPN Public IP
----------------------------- ---------------------
Limit Used Load Limit Used Load
--------------------------------------------------------------------------
250 0 0% 250 2 1% 10.246.0.1*
0 0 0% 0 0 0% 10.246.0.2
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowVPNLoadBalancing(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
route_obj = ShowVPNLoadBalancing(device=self.device)
parsed_output = route_obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
if __name__ == '__main__':
unittest.main() | src/genie/libs/parser/asa/tests/test_show_vpn.py | 4,072 | unit test for show vpn load-balancing
PyATS ============================================ unit test for 'show vpn load-balancing' ============================================= | 176 | en | 0.427064 |
# <editor-fold desc="Basic Imports">
import os
import os.path as p
import requests
from time import time
from argparse import ArgumentParser
import sys
sys.path.append(p.join(p.dirname(__file__), '..'))
sys.path.append(p.join(p.dirname(__file__), '../..'))
# </editor-fold>
# <editor-fold desc="Parse Command Line Args">
prog_file_path = p.join(p.dirname(__file__), 'progress.txt')
relative_base_path = '../../base_indexes/USE_lite_base_IVF16K.index'
base_index_path = p.abspath(p.join(p.dirname(__file__), relative_base_path))
arp = ArgumentParser(description='Vectorize Sentences for Searchable Index.')
arp.add_argument('input_dir', help='Path to raw news dir.')
arp.add_argument('output_dir', help='Path to saved index dir.')
arp.add_argument('-p', '--progress_file', default=prog_file_path,
help='For keeping track of news that has been preprocessed. '
'Default: dig-text-similarity-search/progress.txt')
arp.add_argument('-b', '--base_index_path', default=base_index_path,
help='Path to pre-trained empty faiss index. '
'Default: dig-text-similarity-search/base_indexes/*.index')
arp.add_argument('-l', '--large', action='store_true',
help='Toggle large Universal Sentence Encoder (Transformer NN).')
arp.add_argument('-m', '--m_per_batch', type=int, default=512*128,
help='Sentences per batch.')
arp.add_argument('-n', '--n_per_minibatch', type=int, default=64,
help='Sentences per mini-batch.')
arp.add_argument('-v', '--verbose', action='store_true',
help='Shows progress of batch vectorization.')
arp.add_argument('-t', '--num_threads', default='2',
help='Set CPU thread budget for numpy.')
arp.add_argument('-d', '--no_delete', action='store_false', default=True,
help='Keeps faiss indexes for each batch after merging on-disk.')
arp.add_argument('-a', '--add_shard', action='store_true',
help='Adds shard to running similarity server.')
arp.add_argument('-u', '--url', default='http://localhost:5954/faiss',
help='Port handling similarity server.')
arp.add_argument('-T', '--TF_logging', action='store_false', default=True,
help='Increase verbosity of TensorFlow.')
opts = arp.parse_args()
# </editor-fold>
if opts.num_threads:
print(f'\nRestricting numpy to {opts.num_threads} thread(s)\n')
os.environ['OPENBLAS_NUM_THREADS'] = opts.num_threads
os.environ['NUMEXPR_NUM_THREADS'] = opts.num_threads
os.environ['MKL_NUM_THREADS'] = opts.num_threads
os.environ['OMP_NUM_THREADS'] = opts.num_threads
from dt_sim.data_reader.jl_io_funcs import check_all_docs, get_all_docs
from dt_sim.data_reader.misc_io_funcs import check_unique, clear_dir
from dt_sim.vectorizer.sentence_vectorizer import SentenceVectorizer
from dt_sim.indexer.index_builder import OnDiskIVFBuilder
from dt_sim.processor.corpus_processor import CorpusProcessor
# Suppress TF logging
if opts.TF_logging:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Init
sv = SentenceVectorizer(large=opts.large)
idx_bdr = OnDiskIVFBuilder(path_to_base_index=opts.base_index_path)
cp = CorpusProcessor(vectorizer=sv, index_builder=idx_bdr,
progress_file=opts.progress_file)
# Track progress
prepped_news = cp.track_preprocessing(cp.progress_file, verbose=opts.verbose)
raw_news = cp.get_news_paths(opts.input_dir, verbose=opts.verbose)
candidates = cp.candidate_files(prepped_news, raw_news, verbose=opts.verbose)
file_to_process = candidates[:1] # Preprocesses one news.jl per call
def main(raw_jl, output_dir: str = opts.output_dir,
m_per_batch: int = opts.m_per_batch, n_per_minibatch: int = opts.n_per_minibatch,
no_delete: bool = opts.no_delete, verbose: bool = opts.verbose,
add_shard: bool = opts.add_shard, url: str = opts.url):
subidx_dir, shard_date = cp.init_paths(raw_jl)
if verbose:
print(f'Will process: {raw_jl}\n')
# Check File Content
if verbose:
print(f'\nReading file: {raw_jl}')
jl_stats = check_all_docs(raw_jl, batch_size=m_per_batch)
(doc_count, line_count, junk, n_batches) = jl_stats
if verbose:
print(f'* Found {doc_count} good documents with {line_count} total sentences\n'
f'* Will skip {junk} junk documents\n'
f'* Processing {n_batches} batches\n')
# Preprocess
t_start = time()
doc_batch_gen = get_all_docs(raw_jl, batch_size=m_per_batch)
for i, (batched_sents, batched_ids) in enumerate(doc_batch_gen):
t_0 = time()
if verbose:
print(f' Starting doc batch: {i+1:3d}')
subidx = str(raw_jl.split('/')[-1]).replace('.jl', f'_{i:03d}_sub.index')
subidx_path = p.join(subidx_dir, subidx)
if p.exists(subidx_path):
print(f' File exists: {subidx_path} \n Skipping... ')
cp.index_builder.include_subidx_path(subidx_path)
else:
# Vectorize
emb_batch, id_batch = cp.batch_vectorize(
text_batch=batched_sents, id_batch=batched_ids,
n_minibatch=n_per_minibatch, very_verbose=False
)
t_vect = time()
if verbose:
print(f' * Vectorized in {t_vect - t_0:6.2f}s')
# Make faiss subindex
subidx_path = check_unique(subidx_path)
cp.index_builder.generate_subindex(subidx_path, emb_batch, id_batch)
t_subidx = time()
if verbose:
print(f' * Subindexed in {t_subidx - t_vect:6.2f}s')
# Clear graph
del emb_batch, batched_sents, id_batch
cp.vectorizer.close_session()
t_reset = time()
if verbose:
print(f' * Cleared TF in {t_reset - t_subidx:6.2f}s')
# Restart TF session if necessary
if i < n_batches - 1:
cp.vectorizer.start_session()
if verbose:
print(f' * Started TF in {time() - t_reset:6.2f}s')
if verbose:
mp, sp = divmod(time() - t_start, 60)
print(f' Completed doc batch: {i+1:3d}/{n_batches} '
f' Total time passed: {int(mp):3d}m{sp:0.2f}s\n')
# Merge
# TODO: Title indexes
t_merge = time()
merged_index_path = shard_date + '_all.index'
merged_index_path = p.join(output_dir, merged_index_path)
merged_index_path = check_unique(merged_index_path)
merged_ivfdata_path = shard_date + '_all.ivfdata'
merged_ivfdata_path = p.join(output_dir, merged_ivfdata_path)
merged_ivfdata_path = check_unique(merged_ivfdata_path)
if verbose:
print(f'\n Merging {merged_index_path.split("/")[-1]} on-disk')
assert cp.index_builder.index_path_clear(merged_index_path)
assert cp.index_builder.index_path_clear(merged_ivfdata_path, '.ivfdata')
n_vect = cp.index_builder.merge_IVFs(index_path=merged_index_path,
ivfdata_path=merged_ivfdata_path)
if verbose:
mm, sm = divmod(time() - t_merge, 60)
print(f' Merged subindexes ({n_vect} vectors) in: {int(mm):3d}m{sm:0.2f}s')
# Record progress
cp.record_progress(raw_jl)
# Clear sub.index files after merge
if no_delete:
clear_dir(subidx_dir)
if verbose:
print('\n Cleared sub.index files')
if add_shard:
try:
url = url
payload = {'path': merged_index_path}
r = requests.put(url, params=payload)
print(r.text)
except Exception as e:
print(f'Shard was not added because an exception occurred: {e}')
if __name__ == '__main__':
if len(file_to_process):
jl = file_to_process[0]
main(raw_jl=jl)
else:
print('Nothing to process.')
| py_scripts/preprocessing/prep_shard.py | 7,924 | <editor-fold desc="Basic Imports"> </editor-fold> <editor-fold desc="Parse Command Line Args"> </editor-fold> Suppress TF logging Init Track progress Preprocesses one news.jl per call Check File Content Preprocess Vectorize Make faiss subindex Clear graph Restart TF session if necessary Merge TODO: Title indexes Record progress Clear sub.index files after merge | 363 | en | 0.417188 |
from __future__ import print_function, absolute_import, division
import argparse
import os
import zipfile
import tarfile
import numpy as np
import h5py
from glob import glob
from shutil import rmtree
import sys
sys.path.append('../')
from common.h36m_dataset import H36M_NAMES
output_filename_pt = 'data_2d_h36m_sh_pt_mpii'
output_filename_ft = 'data_2d_h36m_sh_ft_h36m'
subjects = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
cam_map = {
'54138969': 0,
'55011271': 1,
'58860488': 2,
'60457274': 3,
}
metadata = {
'num_joints': 16,
'keypoints_symmetry': [
[3, 4, 5, 13, 14, 15],
[2, 1, 0, 12, 11, 10],
]
}
# Stacked Hourglass produces 16 joints. These are the names.
SH_NAMES = [''] * 16
SH_NAMES[0] = 'RFoot'
SH_NAMES[1] = 'RKnee'
SH_NAMES[2] = 'RHip'
SH_NAMES[3] = 'LHip'
SH_NAMES[4] = 'LKnee'
SH_NAMES[5] = 'LFoot'
SH_NAMES[6] = 'Hip'
SH_NAMES[7] = 'Spine'
SH_NAMES[8] = 'Thorax'
SH_NAMES[9] = 'Head'
SH_NAMES[10] = 'RWrist'
SH_NAMES[11] = 'RElbow'
SH_NAMES[12] = 'RShoulder'
SH_NAMES[13] = 'LShoulder'
SH_NAMES[14] = 'LElbow'
SH_NAMES[15] = 'LWrist'
# Permutation that goes from SH detections to H36M ordering.
SH_TO_GT_PERM = np.array([SH_NAMES.index(h) for h in H36M_NAMES if h != '' and h in SH_NAMES])
assert np.all(SH_TO_GT_PERM == np.array([6, 2, 1, 0, 3, 4, 5, 7, 8, 9, 13, 14, 15, 12, 11, 10]))
metadata['keypoints_symmetry'][0] = [SH_TO_GT_PERM.tolist().index(h) for h in metadata['keypoints_symmetry'][0]]
metadata['keypoints_symmetry'][1] = [SH_TO_GT_PERM.tolist().index(h) for h in metadata['keypoints_symmetry'][1]]
def process_subject(subject, file_list, output):
if subject == 'S11':
assert len(file_list) == 119, "Expected 119 files for subject " + subject + ", got " + str(len(file_list))
else:
assert len(file_list) == 120, "Expected 120 files for subject " + subject + ", got " + str(len(file_list))
for f in file_list:
action, cam = os.path.splitext(os.path.basename(f))[0].replace('_', ' ').split('.')
if subject == 'S11' and action == 'Directions':
continue # Discard corrupted video
if action not in output[subject]:
output[subject][action] = [None, None, None, None]
with h5py.File(f) as hf:
# positions = hf['poses'].value
positions = np.array(hf['poses'])
positions = positions[:, SH_TO_GT_PERM, :]
output[subject][action][cam_map[cam]] = positions.astype('float32')
if __name__ == '__main__':
if os.path.basename(os.getcwd()) != 'data':
print('This script must be launched from the "data" directory')
exit(0)
parser = argparse.ArgumentParser(description='Human3.6M dataset downloader/converter')
parser.add_argument('-pt', '--pretrained', default='', type=str, metavar='PATH', help='convert pretrained dataset')
parser.add_argument('-ft', '--fine-tuned', default='', type=str, metavar='PATH', help='convert fine-tuned dataset')
args = parser.parse_args()
if args.pretrained:
print('Converting pretrained dataset from', args.pretrained)
print('Extracting...')
with zipfile.ZipFile(args.pretrained, 'r') as archive:
archive.extractall('sh_pt')
print('Converting...')
output = {}
for subject in subjects:
output[subject] = {}
file_list = glob('sh_pt/h36m/' + subject + '/StackedHourglass/*.h5')
process_subject(subject, file_list, output)
print('Saving...')
np.savez_compressed(output_filename_pt, positions_2d=output, metadata=metadata)
print('Cleaning up...')
rmtree('sh_pt')
print('Done.')
if args.fine_tuned:
print('Converting fine-tuned dataset from', args.fine_tuned)
print('Extracting...')
with tarfile.open(args.fine_tuned, 'r:gz') as archive:
archive.extractall('sh_ft')
print('Converting...')
output = {}
for subject in subjects:
output[subject] = {}
file_list = glob('sh_ft/' + subject + '/StackedHourglassFineTuned240/*.h5')
process_subject(subject, file_list, output)
print('Saving...')
np.savez_compressed(output_filename_ft, positions_2d=output, metadata=metadata)
print('Cleaning up...')
rmtree('sh_ft')
print('Done.')
| data/prepare_data_2d_h36m_sh.py | 4,384 | Stacked Hourglass produces 16 joints. These are the names. Permutation that goes from SH detections to H36M ordering. Discard corrupted video positions = hf['poses'].value | 182 | en | 0.865426 |
# -*- coding: utf-8 -*-
# pylint: disable=C,R,W
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sqlparse
from sqlparse.sql import Identifier, IdentifierList
from sqlparse.tokens import Keyword, Name
RESULT_OPERATIONS = {'UNION', 'INTERSECT', 'EXCEPT'}
PRECEDES_TABLE_NAME = {'FROM', 'JOIN', 'DESC', 'DESCRIBE', 'WITH'}
# TODO: some sql_lab logic here.
class SupersetQuery(object):
def __init__(self, sql_statement):
self.sql = sql_statement
self._table_names = set()
self._alias_names = set()
# TODO: multistatement support
logging.info('Parsing with sqlparse statement {}'.format(self.sql))
self._parsed = sqlparse.parse(self.sql)
for statement in self._parsed:
self.__extract_from_token(statement)
self._table_names = self._table_names - self._alias_names
@property
def tables(self):
return self._table_names
def is_select(self):
return self._parsed[0].get_type() == 'SELECT'
def stripped(self):
sql = self.sql
if sql:
while sql[-1] in (' ', ';', '\n', '\t'):
sql = sql[:-1]
return sql
@staticmethod
def __precedes_table_name(token_value):
for keyword in PRECEDES_TABLE_NAME:
if keyword in token_value:
return True
return False
@staticmethod
def __get_full_name(identifier):
if len(identifier.tokens) > 1 and identifier.tokens[1].value == '.':
return '{}.{}'.format(identifier.tokens[0].value,
identifier.tokens[2].value)
return identifier.get_real_name()
@staticmethod
def __is_result_operation(keyword):
for operation in RESULT_OPERATIONS:
if operation in keyword.upper():
return True
return False
@staticmethod
def __is_identifier(token):
return (
isinstance(token, IdentifierList) or isinstance(token, Identifier))
def __process_identifier(self, identifier):
# exclude subselects
if '(' not in '{}'.format(identifier):
self._table_names.add(SupersetQuery.__get_full_name(identifier))
return
# store aliases
if hasattr(identifier, 'get_alias'):
self._alias_names.add(identifier.get_alias())
if hasattr(identifier, 'tokens'):
# some aliases are not parsed properly
if identifier.tokens[0].ttype == Name:
self._alias_names.add(identifier.tokens[0].value)
self.__extract_from_token(identifier)
def as_create_table(self, table_name, overwrite=False):
"""Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
"""
# TODO(bkyryliuk): enforce that all the columns have names.
# Presto requires it for the CTA operation.
# TODO(bkyryliuk): drop table if allowed, check the namespace and
# the permissions.
# TODO raise if multi-statement
exec_sql = ''
sql = self.stripped()
if overwrite:
exec_sql = 'DROP TABLE IF EXISTS {table_name};\n'
exec_sql += 'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql.format(**locals())
def __extract_from_token(self, token):
if not hasattr(token, 'tokens'):
return
table_name_preceding_token = False
for item in token.tokens:
if item.is_group and not self.__is_identifier(item):
self.__extract_from_token(item)
if item.ttype in Keyword:
if SupersetQuery.__precedes_table_name(item.value.upper()):
table_name_preceding_token = True
continue
if not table_name_preceding_token:
continue
if item.ttype in Keyword:
if SupersetQuery.__is_result_operation(item.value):
table_name_preceding_token = False
continue
# FROM clause is over
break
if isinstance(item, Identifier):
self.__process_identifier(item)
if isinstance(item, IdentifierList):
for token in item.tokens:
if SupersetQuery.__is_identifier(token):
self.__process_identifier(token)
| superset/sql_parse.py | 4,910 | Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
-*- coding: utf-8 -*- pylint: disable=C,R,W TODO: some sql_lab logic here. TODO: multistatement support exclude subselects store aliases some aliases are not parsed properly TODO(bkyryliuk): enforce that all the columns have names. Presto requires it for the CTA operation. TODO(bkyryliuk): drop table if allowed, check the namespace and the permissions. TODO raise if multi-statement FROM clause is over | 824 | en | 0.705335 |
import dynet as dy
import time
import random
LAYERS = 2
INPUT_DIM = 256 #50 #256
HIDDEN_DIM = 256 # 50 #1024
VOCAB_SIZE = 0
from collections import defaultdict
from itertools import count
import argparse
import sys
import util
class RNNLanguageModel:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder):
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
self.lookup = model.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
def save_to_disk(self, filename):
dy.save(filename, [self.builder, self.lookup, self.R, self.bias])
def load_from_disk(self, filename):
(self.builder, self.lookup, self.R, self.bias) = dy.load(filename, model)
def build_lm_graph(self, sent):
dy.renew_cg()
init_state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
errs = [] # will hold expressions
es=[]
state = init_state
for (cw,nw) in zip(sent,sent[1:]):
# assume word is already a word-id
x_t = dy.lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
err = dy.pickneglogsoftmax(r_t, int(nw))
errs.append(err)
nerr = dy.esum(errs)
return nerr
def predict_next_word(self, sentence):
dy.renew_cg()
init_state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
state = init_state
for cw in sentence:
# assume word is already a word-id
x_t = dy.lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
prob = dy.softmax(r_t)
return prob
def sample(self, first=1, nchars=0, stop=-1):
res = [first]
dy.renew_cg()
state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
cw = first
while True:
x_t = dy.lookup(self.lookup, cw)
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
ydist = dy.softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus', help='Path to the corpus file.')
args = parser.parse_args()
train = util.CharsCorpusReader(args.corpus, begin="<s>")
vocab = util.Vocab.from_corpus(train)
VOCAB_SIZE = vocab.size()
model = dy.Model()
trainer = dy.SimpleSGDTrainer(model, learning_rate=1.0)
#lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder)
lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.LSTMBuilder)
train = list(train)
chars = loss = 0.0
for ITER in range(100):
random.shuffle(train)
for i,sent in enumerate(train):
_start = time.time()
if i % 50 == 0:
trainer.status()
if chars > 0: print(loss / chars,)
for _ in range(1):
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
loss = 0.0
chars = 0.0
chars += len(sent)-1
isent = [vocab.w2i[w] for w in sent]
errs = lm.build_lm_graph(isent)
loss += errs.scalar_value()
errs.backward()
trainer.update()
#print "TM:",(time.time() - _start)/len(sent)
print("ITER {}, loss={}".format(ITER, loss))
trainer.status()
lm.save_to_disk("RNNLanguageModel.model")
print("loading the saved model...")
lm.load_from_disk("RNNLanguageModel.model")
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
| examples/rnnlm/rnnlm.py | 4,542 | 50 256 50 1024 will hold expressions assume word is already a word-id assume word is already a word-idlm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder)print "TM:",(time.time() - _start)/len(sent) | 248 | en | 0.551469 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from mcts.webapi_tests.fm_radio.fm_radio_test import FMRadioTestCommon
from mcts.webapi_tests.fm_radio.test_fm_radio_basic import TestFMRadioBasic
| mcts/webapi_tests/fm_radio/__init__.py | 347 | This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. | 192 | en | 0.934305 |
"""Views of problem2 app."""
from django.shortcuts import render
from .forms import FiboForm
def display(request):
"""Function view to display form in the standard manner."""
if request.method == 'POST':
form = FiboForm(request.POST)
if form.is_valid():
fibo = form.save(commit=False)
evensum = fibo.evenFiboSum()
fibo.save()
return render(request, 'problem2/solution2.html',
{'evensum': evensum, 'form': form})
else:
form = FiboForm()
return render(request, 'problem2/solution2.html', {'form': form})
| problem2/views.py | 620 | Function view to display form in the standard manner.
Views of problem2 app. | 76 | en | 0.7785 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.