code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
"""
dolbyio_rest_apis.media.models.webhook
~~~~~~~~~~~~~~~
This module contains the Webhook model.
"""
from dolbyio_rest_apis.core.helpers import get_value_or_default
class Webhook(dict):
"""The :class:`Webhook` object, which represents a webhook."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.webhook_id = get_value_or_default(self, 'webhook_id', None)
self.url = None
self.headers = None
if 'callback' in self:
callback = self['callback']
self.url = get_value_or_default(callback, 'url', None)
self.headers = get_value_or_default(callback, 'headers', None)
|
[
"dolbyio_rest_apis.core.helpers.get_value_or_default"
] |
[((368, 414), 'dolbyio_rest_apis.core.helpers.get_value_or_default', 'get_value_or_default', (['self', '"""webhook_id"""', 'None'], {}), "(self, 'webhook_id', None)\n", (388, 414), False, 'from dolbyio_rest_apis.core.helpers import get_value_or_default\n'), ((561, 604), 'dolbyio_rest_apis.core.helpers.get_value_or_default', 'get_value_or_default', (['callback', '"""url"""', 'None'], {}), "(callback, 'url', None)\n", (581, 604), False, 'from dolbyio_rest_apis.core.helpers import get_value_or_default\n'), ((632, 679), 'dolbyio_rest_apis.core.helpers.get_value_or_default', 'get_value_or_default', (['callback', '"""headers"""', 'None'], {}), "(callback, 'headers', None)\n", (652, 679), False, 'from dolbyio_rest_apis.core.helpers import get_value_or_default\n')]
|
import asyncio
import logging
import traceback
from collections import namedtuple
from datetime import datetime
from enum import Enum
from typing import Optional, Union
from aioredis import RedisError
from async_rediscache import RedisCache
from dateutil.relativedelta import relativedelta
from discord import Colour, Embed, Forbidden, Member, User
from discord.ext import tasks
from discord.ext.commands import Cog, Context, group, has_any_role
from bot.bot import Bot
from bot.constants import Channels, Colours, Emojis, Event, Icons, MODERATION_ROLES, Roles
from bot.converters import DurationDelta, Expiry
from bot.exts.moderation.modlog import ModLog
from bot.utils.messages import format_user
from bot.utils.scheduling import Scheduler
from bot.utils.time import (
TimestampFormats, discord_timestamp, humanize_delta, parse_duration_string, relativedelta_to_timedelta
)
log = logging.getLogger(__name__)
REJECTION_MESSAGE = """
Hi, {user} - Thanks for your interest in our server!
Due to a current (or detected) cyberattack on our community, we've limited access to the server for new accounts. Since
your account is relatively new, we're unable to provide access to the server at this time.
Even so, thanks for joining! We're very excited at the possibility of having you here, and we hope that this situation
will be resolved soon. In the meantime, please feel free to peruse the resources on our site at
<https://pythondiscord.com/>, and have a nice day!
"""
BASE_CHANNEL_TOPIC = "Python Discord Defense Mechanism"
SECONDS_IN_DAY = 86400
class Action(Enum):
"""Defcon Action."""
ActionInfo = namedtuple('LogInfoDetails', ['icon', 'emoji', 'color', 'template'])
SERVER_OPEN = ActionInfo(Icons.defcon_unshutdown, Emojis.defcon_unshutdown, Colours.soft_green, "")
SERVER_SHUTDOWN = ActionInfo(Icons.defcon_shutdown, Emojis.defcon_shutdown, Colours.soft_red, "")
DURATION_UPDATE = ActionInfo(
Icons.defcon_update, Emojis.defcon_update, Colour.blurple(), "**Threshold:** {threshold}\n\n"
)
class Defcon(Cog):
"""Time-sensitive server defense mechanisms."""
# RedisCache[str, str]
# The cache's keys are "threshold" and "expiry".
# The caches' values are strings formatted as valid input to the DurationDelta converter, or empty when off.
defcon_settings = RedisCache()
def __init__(self, bot: Bot):
self.bot = bot
self.channel = None
self.threshold = relativedelta(days=0)
self.expiry = None
self.scheduler = Scheduler(self.__class__.__name__)
self.bot.loop.create_task(self._sync_settings())
@property
def mod_log(self) -> ModLog:
"""Get currently loaded ModLog cog instance."""
return self.bot.get_cog("ModLog")
@defcon_settings.atomic_transaction
async def _sync_settings(self) -> None:
"""On cog load, try to synchronize DEFCON settings to the API."""
log.trace("Waiting for the guild to become available before syncing.")
await self.bot.wait_until_guild_available()
self.channel = await self.bot.fetch_channel(Channels.defcon)
log.trace("Syncing settings.")
try:
settings = await self.defcon_settings.to_dict()
self.threshold = parse_duration_string(settings["threshold"]) if settings.get("threshold") else None
self.expiry = datetime.fromisoformat(settings["expiry"]) if settings.get("expiry") else None
except RedisError:
log.exception("Unable to get DEFCON settings!")
await self.channel.send(
f"<@&{Roles.moderators}> <@&{Roles.devops}> **WARNING**: Unable to get DEFCON settings!"
f"\n\n```{traceback.format_exc()}```"
)
else:
if self.expiry:
self.scheduler.schedule_at(self.expiry, 0, self._remove_threshold())
self._update_notifier()
log.info(f"DEFCON synchronized: {humanize_delta(self.threshold) if self.threshold else '-'}")
self._update_channel_topic()
@Cog.listener()
async def on_member_join(self, member: Member) -> None:
"""Check newly joining users to see if they meet the account age threshold."""
if self.threshold:
now = datetime.utcnow()
if now - member.created_at < relativedelta_to_timedelta(self.threshold):
log.info(f"Rejecting user {member}: Account is too new")
message_sent = False
try:
await member.send(REJECTION_MESSAGE.format(user=member.mention))
message_sent = True
except Forbidden:
log.debug(f"Cannot send DEFCON rejection DM to {member}: DMs disabled")
except Exception:
# Broadly catch exceptions because DM isn't critical, but it's imperative to kick them.
log.exception(f"Error sending DEFCON rejection message to {member}")
await member.kick(reason="DEFCON active, user is too new")
self.bot.stats.incr("defcon.leaves")
message = (
f"{format_user(member)} was denied entry because their account is too new."
)
if not message_sent:
message = f"{message}\n\nUnable to send rejection message via DM; they probably have DMs disabled."
await self.mod_log.send_log_message(
Icons.defcon_denied, Colours.soft_red, "Entry denied",
message, member.avatar_url_as(static_format="png")
)
@group(name='defcon', aliases=('dc',), invoke_without_command=True)
@has_any_role(*MODERATION_ROLES)
async def defcon_group(self, ctx: Context) -> None:
"""Check the DEFCON status or run a subcommand."""
await ctx.send_help(ctx.command)
@defcon_group.command(aliases=('s',))
@has_any_role(*MODERATION_ROLES)
async def status(self, ctx: Context) -> None:
"""Check the current status of DEFCON mode."""
embed = Embed(
colour=Colour.blurple(), title="DEFCON Status",
description=f"""
**Threshold:** {humanize_delta(self.threshold) if self.threshold else "-"}
**Expires:** {discord_timestamp(self.expiry, TimestampFormats.RELATIVE) if self.expiry else "-"}
**Verification level:** {ctx.guild.verification_level.name}
"""
)
await ctx.send(embed=embed)
@defcon_group.command(name="threshold", aliases=('t', 'd'))
@has_any_role(*MODERATION_ROLES)
async def threshold_command(
self, ctx: Context, threshold: Union[DurationDelta, int], expiry: Optional[Expiry] = None
) -> None:
"""
Set how old an account must be to join the server.
The threshold is the minimum required account age. Can accept either a duration string or a number of days.
Set it to 0 to have no threshold.
The expiry allows to automatically remove the threshold after a designated time. If no expiry is specified,
the cog will remind to remove the threshold hourly.
"""
if isinstance(threshold, int):
threshold = relativedelta(days=threshold)
await self._update_threshold(ctx.author, threshold=threshold, expiry=expiry)
@defcon_group.command()
@has_any_role(Roles.admins)
async def shutdown(self, ctx: Context) -> None:
"""Shut down the server by setting send permissions of everyone to False."""
role = ctx.guild.default_role
permissions = role.permissions
permissions.update(send_messages=False, add_reactions=False, connect=False)
await role.edit(reason="DEFCON shutdown", permissions=permissions)
await ctx.send(f"{Action.SERVER_SHUTDOWN.value.emoji} Server shut down.")
@defcon_group.command()
@has_any_role(Roles.admins)
async def unshutdown(self, ctx: Context) -> None:
"""Open up the server again by setting send permissions of everyone to None."""
role = ctx.guild.default_role
permissions = role.permissions
permissions.update(send_messages=True, add_reactions=True, connect=True)
await role.edit(reason="DEFCON unshutdown", permissions=permissions)
await ctx.send(f"{Action.SERVER_OPEN.value.emoji} Server reopened.")
def _update_channel_topic(self) -> None:
"""Update the #defcon channel topic with the current DEFCON status."""
new_topic = f"{BASE_CHANNEL_TOPIC}\n(Threshold: {humanize_delta(self.threshold) if self.threshold else '-'})"
self.mod_log.ignore(Event.guild_channel_update, Channels.defcon)
asyncio.create_task(self.channel.edit(topic=new_topic))
@defcon_settings.atomic_transaction
async def _update_threshold(self, author: User, threshold: relativedelta, expiry: Optional[Expiry] = None) -> None:
"""Update the new threshold in the cog, cache, defcon channel, and logs, and additionally schedule expiry."""
self.threshold = threshold
if threshold == relativedelta(days=0): # If the threshold is 0, we don't need to schedule anything
expiry = None
self.expiry = expiry
# Either way, we cancel the old task.
self.scheduler.cancel_all()
if self.expiry is not None:
self.scheduler.schedule_at(expiry, 0, self._remove_threshold())
self._update_notifier()
# Make sure to handle the critical part of the update before writing to Redis.
error = ""
try:
await self.defcon_settings.update(
{
'threshold': Defcon._stringify_relativedelta(self.threshold) if self.threshold else "",
'expiry': expiry.isoformat() if expiry else 0
}
)
except RedisError:
error = ", but failed to write to cache"
action = Action.DURATION_UPDATE
expiry_message = ""
if expiry:
expiry_message = f" for the next {humanize_delta(relativedelta(expiry, datetime.utcnow()), max_units=2)}"
if self.threshold:
channel_message = (
f"updated; accounts must be {humanize_delta(self.threshold)} "
f"old to join the server{expiry_message}"
)
else:
channel_message = "removed"
await self.channel.send(
f"{action.value.emoji} DEFCON threshold {channel_message}{error}."
)
await self._send_defcon_log(action, author)
self._update_channel_topic()
self._log_threshold_stat(threshold)
async def _remove_threshold(self) -> None:
"""Resets the threshold back to 0."""
await self._update_threshold(self.bot.user, relativedelta(days=0))
@staticmethod
def _stringify_relativedelta(delta: relativedelta) -> str:
"""Convert a relativedelta object to a duration string."""
units = [("years", "y"), ("months", "m"), ("days", "d"), ("hours", "h"), ("minutes", "m"), ("seconds", "s")]
return "".join(f"{getattr(delta, unit)}{symbol}" for unit, symbol in units if getattr(delta, unit)) or "0s"
def _log_threshold_stat(self, threshold: relativedelta) -> None:
"""Adds the threshold to the bot stats in days."""
threshold_days = relativedelta_to_timedelta(threshold).total_seconds() / SECONDS_IN_DAY
self.bot.stats.gauge("defcon.threshold", threshold_days)
async def _send_defcon_log(self, action: Action, actor: User) -> None:
"""Send log message for DEFCON action."""
info = action.value
log_msg: str = (
f"**Staffer:** {actor.mention} {actor} (`{actor.id}`)\n"
f"{info.template.format(threshold=(humanize_delta(self.threshold) if self.threshold else '-'))}"
)
status_msg = f"DEFCON {action.name.lower()}"
await self.mod_log.send_log_message(info.icon, info.color, status_msg, log_msg)
def _update_notifier(self) -> None:
"""Start or stop the notifier according to the DEFCON status."""
if self.threshold and self.expiry is None and not self.defcon_notifier.is_running():
log.info("DEFCON notifier started.")
self.defcon_notifier.start()
elif (not self.threshold or self.expiry is not None) and self.defcon_notifier.is_running():
log.info("DEFCON notifier stopped.")
self.defcon_notifier.cancel()
@tasks.loop(hours=1)
async def defcon_notifier(self) -> None:
"""Routinely notify moderators that DEFCON is active."""
await self.channel.send(f"Defcon is on and is set to {humanize_delta(self.threshold)}.")
def cog_unload(self) -> None:
"""Cancel the notifer and threshold removal tasks when the cog unloads."""
log.trace("Cog unload: canceling defcon notifier task.")
self.defcon_notifier.cancel()
self.scheduler.cancel_all()
def setup(bot: Bot) -> None:
"""Load the Defcon cog."""
bot.add_cog(Defcon(bot))
|
[
"datetime.datetime.fromisoformat",
"bot.utils.time.discord_timestamp",
"bot.utils.time.parse_duration_string",
"bot.utils.time.relativedelta_to_timedelta",
"bot.utils.time.humanize_delta",
"async_rediscache.RedisCache",
"discord.ext.commands.has_any_role",
"discord.ext.commands.Cog.listener",
"discord.Colour.blurple",
"dateutil.relativedelta.relativedelta",
"bot.utils.scheduling.Scheduler",
"discord.ext.tasks.loop",
"datetime.datetime.utcnow",
"collections.namedtuple",
"bot.utils.messages.format_user",
"discord.ext.commands.group",
"traceback.format_exc",
"logging.getLogger"
] |
[((889, 916), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (906, 916), False, 'import logging\n'), ((1624, 1692), 'collections.namedtuple', 'namedtuple', (['"""LogInfoDetails"""', "['icon', 'emoji', 'color', 'template']"], {}), "('LogInfoDetails', ['icon', 'emoji', 'color', 'template'])\n", (1634, 1692), False, 'from collections import namedtuple\n'), ((2331, 2343), 'async_rediscache.RedisCache', 'RedisCache', ([], {}), '()\n', (2341, 2343), False, 'from async_rediscache import RedisCache\n'), ((4072, 4086), 'discord.ext.commands.Cog.listener', 'Cog.listener', ([], {}), '()\n', (4084, 4086), False, 'from discord.ext.commands import Cog, Context, group, has_any_role\n'), ((5652, 5718), 'discord.ext.commands.group', 'group', ([], {'name': '"""defcon"""', 'aliases': "('dc',)", 'invoke_without_command': '(True)'}), "(name='defcon', aliases=('dc',), invoke_without_command=True)\n", (5657, 5718), False, 'from discord.ext.commands import Cog, Context, group, has_any_role\n'), ((5724, 5755), 'discord.ext.commands.has_any_role', 'has_any_role', (['*MODERATION_ROLES'], {}), '(*MODERATION_ROLES)\n', (5736, 5755), False, 'from discord.ext.commands import Cog, Context, group, has_any_role\n'), ((5960, 5991), 'discord.ext.commands.has_any_role', 'has_any_role', (['*MODERATION_ROLES'], {}), '(*MODERATION_ROLES)\n', (5972, 5991), False, 'from discord.ext.commands import Cog, Context, group, has_any_role\n'), ((6626, 6657), 'discord.ext.commands.has_any_role', 'has_any_role', (['*MODERATION_ROLES'], {}), '(*MODERATION_ROLES)\n', (6638, 6657), False, 'from discord.ext.commands import Cog, Context, group, has_any_role\n'), ((7434, 7460), 'discord.ext.commands.has_any_role', 'has_any_role', (['Roles.admins'], {}), '(Roles.admins)\n', (7446, 7460), False, 'from discord.ext.commands import Cog, Context, group, has_any_role\n'), ((7951, 7977), 'discord.ext.commands.has_any_role', 'has_any_role', (['Roles.admins'], {}), '(Roles.admins)\n', (7963, 7977), False, 'from discord.ext.commands import Cog, Context, group, has_any_role\n'), ((12564, 12583), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'hours': '(1)'}), '(hours=1)\n', (12574, 12583), False, 'from discord.ext import tasks\n'), ((1985, 2001), 'discord.Colour.blurple', 'Colour.blurple', ([], {}), '()\n', (1999, 2001), False, 'from discord import Colour, Embed, Forbidden, Member, User\n'), ((2455, 2476), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(0)'}), '(days=0)\n', (2468, 2476), False, 'from dateutil.relativedelta import relativedelta\n'), ((2530, 2564), 'bot.utils.scheduling.Scheduler', 'Scheduler', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (2539, 2564), False, 'from bot.utils.scheduling import Scheduler\n'), ((4279, 4296), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4294, 4296), False, 'from datetime import datetime\n'), ((7285, 7314), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': 'threshold'}), '(days=threshold)\n', (7298, 7314), False, 'from dateutil.relativedelta import relativedelta\n'), ((9152, 9173), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(0)'}), '(days=0)\n', (9165, 9173), False, 'from dateutil.relativedelta import relativedelta\n'), ((3271, 3315), 'bot.utils.time.parse_duration_string', 'parse_duration_string', (["settings['threshold']"], {}), "(settings['threshold'])\n", (3292, 3315), False, 'from bot.utils.time import TimestampFormats, discord_timestamp, humanize_delta, parse_duration_string, relativedelta_to_timedelta\n'), ((3381, 3423), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["settings['expiry']"], {}), "(settings['expiry'])\n", (3403, 3423), False, 'from datetime import datetime\n'), ((4339, 4381), 'bot.utils.time.relativedelta_to_timedelta', 'relativedelta_to_timedelta', (['self.threshold'], {}), '(self.threshold)\n', (4365, 4381), False, 'from bot.utils.time import TimestampFormats, discord_timestamp, humanize_delta, parse_duration_string, relativedelta_to_timedelta\n'), ((6139, 6155), 'discord.Colour.blurple', 'Colour.blurple', ([], {}), '()\n', (6153, 6155), False, 'from discord import Colour, Embed, Forbidden, Member, User\n'), ((10865, 10886), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(0)'}), '(days=0)\n', (10878, 10886), False, 'from dateutil.relativedelta import relativedelta\n'), ((8615, 8645), 'bot.utils.time.humanize_delta', 'humanize_delta', (['self.threshold'], {}), '(self.threshold)\n', (8629, 8645), False, 'from bot.utils.time import TimestampFormats, discord_timestamp, humanize_delta, parse_duration_string, relativedelta_to_timedelta\n'), ((10302, 10332), 'bot.utils.time.humanize_delta', 'humanize_delta', (['self.threshold'], {}), '(self.threshold)\n', (10316, 10332), False, 'from bot.utils.time import TimestampFormats, discord_timestamp, humanize_delta, parse_duration_string, relativedelta_to_timedelta\n'), ((11424, 11461), 'bot.utils.time.relativedelta_to_timedelta', 'relativedelta_to_timedelta', (['threshold'], {}), '(threshold)\n', (11450, 11461), False, 'from bot.utils.time import TimestampFormats, discord_timestamp, humanize_delta, parse_duration_string, relativedelta_to_timedelta\n'), ((5179, 5198), 'bot.utils.messages.format_user', 'format_user', (['member'], {}), '(member)\n', (5190, 5198), False, 'from bot.utils.messages import format_user\n'), ((12756, 12786), 'bot.utils.time.humanize_delta', 'humanize_delta', (['self.threshold'], {}), '(self.threshold)\n', (12770, 12786), False, 'from bot.utils.time import TimestampFormats, discord_timestamp, humanize_delta, parse_duration_string, relativedelta_to_timedelta\n'), ((3967, 3997), 'bot.utils.time.humanize_delta', 'humanize_delta', (['self.threshold'], {}), '(self.threshold)\n', (3981, 3997), False, 'from bot.utils.time import TimestampFormats, discord_timestamp, humanize_delta, parse_duration_string, relativedelta_to_timedelta\n'), ((6266, 6296), 'bot.utils.time.humanize_delta', 'humanize_delta', (['self.threshold'], {}), '(self.threshold)\n', (6280, 6296), False, 'from bot.utils.time import TimestampFormats, discord_timestamp, humanize_delta, parse_duration_string, relativedelta_to_timedelta\n'), ((6355, 6412), 'bot.utils.time.discord_timestamp', 'discord_timestamp', (['self.expiry', 'TimestampFormats.RELATIVE'], {}), '(self.expiry, TimestampFormats.RELATIVE)\n', (6372, 6412), False, 'from bot.utils.time import TimestampFormats, discord_timestamp, humanize_delta, parse_duration_string, relativedelta_to_timedelta\n'), ((10162, 10179), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (10177, 10179), False, 'from datetime import datetime\n'), ((11855, 11885), 'bot.utils.time.humanize_delta', 'humanize_delta', (['self.threshold'], {}), '(self.threshold)\n', (11869, 11885), False, 'from bot.utils.time import TimestampFormats, discord_timestamp, humanize_delta, parse_duration_string, relativedelta_to_timedelta\n'), ((3715, 3737), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3735, 3737), False, 'import traceback\n')]
|
from django.contrib import messages
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render, reverse
from django.utils.translation import ugettext_lazy as _
import simplejson
from rest_framework.decorators import api_view, throttle_classes
from rest_framework.response import Response
from .forms import EventCommentForm
from .models.events import Attendee, Event, EventComment, Place, PlaceSerializer
from .models.locale import (
SPR,
City,
CitySerializer,
Country,
CountrySerializer,
SPRSerializer,
)
from .models.profiles import (
Member,
Organization,
Sponsor,
SponsorSerializer,
Team,
TeamSerializer,
UserProfile,
)
from .models.search import Searchable, SearchableSerializer
from .utils import verify_csrf
# Create your views here.
def searchable_list(request, *args, **kwargs):
searchables = Searchable.objects.exclude(location_name="")
serializer = SearchableSerializer(searchables, many=True)
return JsonResponse(serializer.data, safe=False)
def events_list(request, *args, **kwargs):
events = Event.objects.all()
context = {"events_list": events}
return render(request, "events/event_list.html", context)
@api_view(["GET"])
def org_member_list(request, org_id):
org = get_object_or_404(Organization, id=org_id)
serializer = TeamSerializer(org.teams.all(), many=True)
return Response(serializer.data)
@api_view(["GET"])
def places_list(request, *args, **kwargs):
places = Place.objects.all()
if "q" in request.GET:
match = request.GET.get("q", "")
places = Place.objects.filter(name__icontains=match)
else:
places = Place.objects.all()
serializer = PlaceSerializer(places, many=True)
return Response(serializer.data)
@api_view(["GET"])
def country_list(request, *args, **kwargs):
if "q" in request.GET:
match = request.GET.get("q", "")
countries = Country.objects.filter(name__icontains=match)[:20]
else:
countries = Country.objects.all()[:20]
serializer = CountrySerializer(countries, many=True)
return Response(serializer.data)
@api_view(["GET"])
def spr_list(request, *args, **kwargs):
if "q" in request.GET:
match = request.GET.get("q", "")
sprs = SPR.objects.filter(name__icontains=match)[:20]
else:
sprs = SPR.objects.all()[:20]
if "country" in request.GET and request.GET.get("country") is not "":
sprs = sprs.filter(country=request.GET.get("country"))
serializer = SPRSerializer(sprs, many=True)
return Response(serializer.data)
@api_view(["GET"])
def city_list(request, *args, **kwargs):
if "q" in request.GET:
match = request.GET.get("q", "")
cities = City.objects.filter(name__icontains=match).order_by("-population")
else:
cities = City.objects.all()
if "spr" in request.GET and request.GET.get("spr") is not "":
cities = cities.filter(spr=request.GET.get("spr"))
serializer = CitySerializer(cities[:50], many=True)
return Response(serializer.data)
@api_view(["GET"])
def find_city(request):
cities = City.objects.all()
if "city" in request.GET:
cities = cities.filter(name=request.GET.get("city"))
if "spr" in request.GET:
cities = cities.filter(spr__name=request.GET.get("spr"))
if "country" in request.GET:
cities = cities.filter(spr__country__name=request.GET.get("country"))
try:
city = cities[0]
serializer = CitySerializer(city)
return Response(serializer.data)
except:
return Response({})
@api_view(["GET"])
def sponsor_list(request):
if "q" in request.GET:
match = request.GET.get("q", "")
sponsors = Sponsor.objects.filter(name__icontains=match)
else:
sponsors = Sponsor.objects.all()
serializer = SponsorSerializer(sponsors[:50], many=True)
return Response(serializer.data)
@verify_csrf(token_key="csrftoken")
def join_team(request, team_id):
if request.user.is_anonymous:
messages.add_message(
request,
messages.WARNING,
message=_("You must be logged in to join a team."),
)
return redirect("show-team", team_id=team_id)
team = Team.objects.get(id=team_id)
if request.user.profile in team.members.all():
messages.add_message(
request, messages.INFO, message=_("You are already a member of this team.")
)
return redirect("show-team", team_id=team_id)
new_member = Member.objects.create(
team=team, user=request.user.profile, role=Member.NORMAL
)
messages.add_message(request, messages.SUCCESS, message=_("Welcome to the team!"))
return redirect("show-team", team_id=team_id)
@verify_csrf(token_key="csrftoken")
def leave_team(request, team_id):
if request.user.is_anonymous:
messages.add_message(
request,
messages.WARNING,
message=_("You must be logged in to leave a team."),
)
return redirect("show-team", team_id=team_id)
team = Team.objects.get(id=team_id)
if request.user.profile not in team.members.all():
messages.add_message(
request, messages.INFO, message=_("You are not a member of this team.")
)
return redirect("show-team", team_id=team_id)
Member.objects.filter(team=team, user=request.user.profile).delete()
messages.add_message(
request, messages.SUCCESS, message=_("You are no longer on this team.")
)
return redirect("show-team", team_id=team_id)
|
[
"django.shortcuts.redirect",
"django.utils.translation.ugettext_lazy",
"django.http.JsonResponse",
"django.shortcuts.get_object_or_404",
"rest_framework.response.Response",
"django.shortcuts.render",
"rest_framework.decorators.api_view"
] |
[((1260, 1277), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (1268, 1277), False, 'from rest_framework.decorators import api_view, throttle_classes\n'), ((1469, 1486), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (1477, 1486), False, 'from rest_framework.decorators import api_view, throttle_classes\n'), ((1831, 1848), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (1839, 1848), False, 'from rest_framework.decorators import api_view, throttle_classes\n'), ((2186, 2203), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (2194, 2203), False, 'from rest_framework.decorators import api_view, throttle_classes\n'), ((2648, 2665), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (2656, 2665), False, 'from rest_framework.decorators import api_view, throttle_classes\n'), ((3128, 3145), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (3136, 3145), False, 'from rest_framework.decorators import api_view, throttle_classes\n'), ((3658, 3675), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (3666, 3675), False, 'from rest_framework.decorators import api_view, throttle_classes\n'), ((1037, 1078), 'django.http.JsonResponse', 'JsonResponse', (['serializer.data'], {'safe': '(False)'}), '(serializer.data, safe=False)\n', (1049, 1078), False, 'from django.http import HttpResponse, JsonResponse\n'), ((1206, 1256), 'django.shortcuts.render', 'render', (['request', '"""events/event_list.html"""', 'context'], {}), "(request, 'events/event_list.html', context)\n", (1212, 1256), False, 'from django.shortcuts import get_object_or_404, redirect, render, reverse\n'), ((1326, 1368), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Organization'], {'id': 'org_id'}), '(Organization, id=org_id)\n', (1343, 1368), False, 'from django.shortcuts import get_object_or_404, redirect, render, reverse\n'), ((1440, 1465), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1448, 1465), False, 'from rest_framework.response import Response\n'), ((1802, 1827), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1810, 1827), False, 'from rest_framework.response import Response\n'), ((2157, 2182), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (2165, 2182), False, 'from rest_framework.response import Response\n'), ((2619, 2644), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (2627, 2644), False, 'from rest_framework.response import Response\n'), ((3099, 3124), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (3107, 3124), False, 'from rest_framework.response import Response\n'), ((3960, 3985), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (3968, 3985), False, 'from rest_framework.response import Response\n'), ((4783, 4821), 'django.shortcuts.redirect', 'redirect', (['"""show-team"""'], {'team_id': 'team_id'}), "('show-team', team_id=team_id)\n", (4791, 4821), False, 'from django.shortcuts import get_object_or_404, redirect, render, reverse\n'), ((5607, 5645), 'django.shortcuts.redirect', 'redirect', (['"""show-team"""'], {'team_id': 'team_id'}), "('show-team', team_id=team_id)\n", (5615, 5645), False, 'from django.shortcuts import get_object_or_404, redirect, render, reverse\n'), ((3589, 3614), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (3597, 3614), False, 'from rest_framework.response import Response\n'), ((4262, 4300), 'django.shortcuts.redirect', 'redirect', (['"""show-team"""'], {'team_id': 'team_id'}), "('show-team', team_id=team_id)\n", (4270, 4300), False, 'from django.shortcuts import get_object_or_404, redirect, render, reverse\n'), ((4535, 4573), 'django.shortcuts.redirect', 'redirect', (['"""show-team"""'], {'team_id': 'team_id'}), "('show-team', team_id=team_id)\n", (4543, 4573), False, 'from django.shortcuts import get_object_or_404, redirect, render, reverse\n'), ((5099, 5137), 'django.shortcuts.redirect', 'redirect', (['"""show-team"""'], {'team_id': 'team_id'}), "('show-team', team_id=team_id)\n", (5107, 5137), False, 'from django.shortcuts import get_object_or_404, redirect, render, reverse\n'), ((5372, 5410), 'django.shortcuts.redirect', 'redirect', (['"""show-team"""'], {'team_id': 'team_id'}), "('show-team', team_id=team_id)\n", (5380, 5410), False, 'from django.shortcuts import get_object_or_404, redirect, render, reverse\n'), ((3642, 3654), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (3650, 3654), False, 'from rest_framework.response import Response\n'), ((4745, 4770), 'django.utils.translation.ugettext_lazy', '_', (['"""Welcome to the team!"""'], {}), "('Welcome to the team!')\n", (4746, 4770), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5553, 5589), 'django.utils.translation.ugettext_lazy', '_', (['"""You are no longer on this team."""'], {}), "('You are no longer on this team.')\n", (5554, 5589), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4193, 4235), 'django.utils.translation.ugettext_lazy', '_', (['"""You must be logged in to join a team."""'], {}), "('You must be logged in to join a team.')\n", (4194, 4235), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4466, 4509), 'django.utils.translation.ugettext_lazy', '_', (['"""You are already a member of this team."""'], {}), "('You are already a member of this team.')\n", (4467, 4509), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5029, 5072), 'django.utils.translation.ugettext_lazy', '_', (['"""You must be logged in to leave a team."""'], {}), "('You must be logged in to leave a team.')\n", (5030, 5072), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5307, 5346), 'django.utils.translation.ugettext_lazy', '_', (['"""You are not a member of this team."""'], {}), "('You are not a member of this team.')\n", (5308, 5346), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
from sklearn.model_selection import KFold
from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef
from sklearn import svm
from feature import Feature
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import ExtraTreesClassifier
import numpy as np
def random_data(file_name):
seqs_blosum62, label, work2vec, train_seqs, seqs_dde, seqs_z, seqs_aac, seqs_dpc, seqs_ctdd, seqs_ctrial, seqs_ksctriad, seqs_gtpc, seqs_cksaagp, seqs_gaac, seqs_gdpc, seqs_ctdt, seqs_geary, seqs_cksaap, seqs_aaindex, seqs_paac = Feature(
file_name)
# , seqs_dde, seqs_z, seqs_aac, seqs_dpc, seqs_ctdd
seqs_dde = np.array(seqs_dde)
train_seqs = np.array(train_seqs)
seqs_ksctriad = np.array(seqs_ksctriad)
#work2vec = np.array(work2vec)
seqs_blosum62 = np.array(seqs_blosum62)
print(seqs_blosum62.shape)
seqs_ctrial = np.array(seqs_ctrial)
seqs_gtpc = np.array(seqs_gtpc)
seqs_cksaagp = np.array(seqs_cksaagp)
seqs_gaac = np.array(seqs_gaac)
seqs_cksaap = np.array(seqs_cksaap)
seqs_aaindex = np.array(seqs_aaindex, dtype=float)
seqs_paac = np.array(seqs_paac)
seqs_gdpc = np.array(seqs_gdpc)
# print(seqs_gtpc.shape)
seqs_ctdt = np.array(seqs_ctdt)
seqs_ctdt = seqs_ctdt.reshape(seqs_ctdt.shape[0], -1)
seqs_ctdd = np.array(seqs_ctdd)
seqs_ctdd = seqs_ctdd.reshape(seqs_ctdd.shape[0], -1)
seqs_dpc = np.array(seqs_dpc)
seqs_dpc = seqs_dpc.reshape(seqs_dpc.shape[0], -1)
seqs_aac = np.array(seqs_aac)
seqs_aac = seqs_aac.reshape(seqs_aac.shape[0], -1)
seqs_z = np.array(seqs_z)
seqs_z = seqs_z.reshape(seqs_z.shape[0], -1)
seqs_geary = np.array(seqs_geary)
seqs_geary = seqs_geary.reshape(seqs_geary.shape[0], -1)
seqs_dde = np.array(seqs_dde)
seqs_dde = seqs_dde.reshape(seqs_dde.shape[0], -1)
#work2vec = work2vec.reshape(work2vec.shape[0], -1)
seqs_ctrial = seqs_ctrial.reshape(seqs_ctrial.shape[0], -1)
seqs_ksctriad = seqs_ksctriad.reshape(seqs_ksctriad.shape[0], -1)
seqs_blosum62 = seqs_blosum62.reshape(seqs_blosum62.shape[0], -1)
seqs_gtpc = seqs_gtpc.reshape(seqs_gtpc.shape[0], -1)
seqs_cksaagp = seqs_cksaagp.reshape(seqs_cksaagp.shape[0], -1)
seqs_gaac = seqs_gaac.reshape(seqs_gaac.shape[0], -1)
seqs_cksaap = seqs_cksaap.reshape(seqs_cksaap.shape[0], -1)
seqs_aaindex = seqs_aaindex.reshape(seqs_aaindex.shape[0], -1)
seqs_dpc = seqs_dpc.reshape(seqs_dpc.shape[0], -1)
seqs_paac = seqs_paac.reshape(seqs_paac.shape[0], -1)
seqs_gdpc = seqs_gdpc.reshape(seqs_gdpc.shape[0], -1)
#data_features = np.concatenate((seqs_blosum62, seqs_ksctriad, seqs_cksaap, seqs_aaindex), 1)
#0.93
data_features1 = np.concatenate((seqs_aac,seqs_dde,seqs_paac,seqs_gaac), 1)
data_features2 = np.concatenate((seqs_aac,seqs_dde,seqs_paac,seqs_gdpc), 1)
# 0.929
data_features3 = np.concatenate((seqs_aac,seqs_dde,seqs_paac,seqs_ctdt), 1)
data_features4 = np.concatenate((seqs_dde,seqs_paac,seqs_gaac,seqs_gdpc), 1)
data_features5 = np.concatenate((seqs_dde,seqs_paac,seqs_ctdt,train_seqs), 1)
data_features6 = np.concatenate((seqs_cksaap,seqs_aac,seqs_dde,seqs_gtpc, seqs_ctdt), 1)
data_features7 = np.concatenate((seqs_cksaap,seqs_aac,seqs_dde,seqs_gdpc,seqs_aaindex), 1)
data_features8 = np.concatenate((seqs_aac,seqs_dde,seqs_paac,seqs_gaac,train_seqs), 1)
# 0.928
data_features9 = np.concatenate((seqs_aac,seqs_dde,seqs_gaac,seqs_geary,train_seqs), 1)
data_features10 = np.concatenate((seqs_dde,seqs_paac,seqs_gtpc,seqs_gdpc,seqs_ctdt), 1)
data_features11 = np.concatenate((seqs_cksaap,seqs_aac,seqs_dde,seqs_gtpc,seqs_gaac,seqs_aaindex,train_seqs), 1)
data_features12 = np.concatenate((seqs_aac,seqs_dde,seqs_dpc,seqs_paac,seqs_gaac,seqs_aaindex), 1)
data_features13 = np.concatenate((seqs_aac,seqs_dde,seqs_gaac,seqs_ctdt,seqs_aaindex,seqs_ksctriad), 1)
data_features14 = np.concatenate((seqs_aac,seqs_dde,seqs_dpc,seqs_paac,seqs_gdpc,seqs_aaindex,seqs_ctrial,train_seqs), 1)
data_features15 = np.concatenate((seqs_aac,seqs_dde,seqs_gtpc,seqs_ksctriad,seqs_cksaap), 1)
#seqs_aac', 'seqs_cksaagp', 'seqs_gaac', 'seqs_gdpc
# seqs_blosum62, label, work2vec, train_seqs, seqs_dde, seqs_z, seqs_aac, seqs_dpc, seqs_ctdd = Feature(file_name)
# train_seqs = np.array(train_seqs)
# seqs_dde = np.array(seqs_dde)
# seqs_dde = seqs_dde.reshape(seqs_dde.shape[0], -1)
# data_features = np.concatenate((train_seqs, seqs_dde), 1)
# if num > 1:
# data_features = np.array(eval(data_feature[0]))
# if data_features.ndim > 2:
# data_features = data_features.reshape(data_features.shape[0], -1)
# for i in range(num - 1):
# temp = eval(data_feature[i + 1])
# temp = np.array(temp)
# if temp.ndim > 2:
# temp = temp.reshape(temp.shape[0], -1)
# data_features = np.concatenate((data_features, temp), 1)
# else:
# data_feature = eval(data_feature)
# data_features = np.array(data_feature)
# if data_features.ndim > 2:
# data_features = data_features.reshape(data_features.shape[0], -1)
label = np.array(label)
label = label.reshape(label.shape[0], )
# indx = np.arange(data_features.shape[0])
#
# np.random.shuffle(indx)
#
# label = ((label[indx]))
#
# data_features = (data_features[indx])
return data_features1, data_features2, data_features3, data_features4, data_features5, data_features6, data_features7, data_features8, data_features9, data_features10, data_features11, data_features12, data_features13, data_features14, data_features15, label
# data_features1, data_features2, data_features3, data_features4, data_features5, data_features6, data_features7, data_features8, data_features9, data_features10, data_features11, data_features12, data_features13, data_features14, data_features15, label = random_data(
# "D:\E下载\\ACP20AltTrain (1).fasta")
#
# te_data_features1, te_data_features2, te_data_features3, te_data_features4, te_data_features5, te_data_features6, te_data_features7, te_data_features8, te_data_features9, te_data_features10, te_data_features11, te_data_features12, te_data_features13, te_data_features14, te_data_features15, te_label = random_data(
# "D:\E下载\\ACP20AltTest (1).fasta")
#ml_model = XGBClassifier(use_label_encoder=False)
ml_model1 = ExtraTreesClassifier(random_state= 0)
ml_model2 = ExtraTreesClassifier(random_state= 0)
ml_model3 = ExtraTreesClassifier(random_state= 0)
ml_model4 = ExtraTreesClassifier(random_state= 0)
ml_model5 = ExtraTreesClassifier(random_state= 0)
ml_model6 = ExtraTreesClassifier(random_state= 0)
ml_model7 = ExtraTreesClassifier(random_state= 0)
ml_model8 = ExtraTreesClassifier(random_state= 0)
ml_model9 = ExtraTreesClassifier(random_state= 0)
ml_model10 = ExtraTreesClassifier(random_state= 0)
estimatorf = VotingClassifier(estimators=[
('log_clf', LogisticRegression()),
('svm_clf', svm.SVC(probability=True)),
], voting='soft')
data_features1x, data_features2x, data_features3x, data_features4x, data_features5x, data_features6x, data_features7x, data_features8x, data_features9x, data_features10x, data_features11x, data_features12x, data_features13x, data_features14x, data_features15x, labelx = random_data("D:\E下载\\CPP.txt")
skf = KFold(n_splits= 10, shuffle= True,random_state= 999)
k_re_list1 = []
MCC1 = []
SP1 = []
SN1 = []
k_re_list2 = []
MCC2 = []
SP2 = []
SN2 = []
k_re_list3 = []
MCC3 = []
SP3 = []
SN3 = []
k_re_list4 = []
MCC4 = []
SP4 = []
SN4 = []
k_re_list5 = []
MCC5 = []
SP5 = []
SN5 = []
k_re_list6 = []
MCC6 = []
SP6 = []
SN6 = []
k_re_list7 = []
MCC7 = []
SP7 = []
SN7 = []
k_re_list8 = []
MCC8 = []
SP8 = []
SN8 = []
k_re_list9 = []
MCC9 = []
SP9 = []
SN9 = []
k_re_list10 = []
MCC10 = []
SP10 = []
SN10 = []
k_re_list1x = []
MCC1x = []
SP1x = []
SN1x = []
for K, (train_idx, val_idx) in enumerate(skf.split(data_features1x, labelx)):
print("{}K-fold\n".format(K))
# data_features1 = data_features1x[train_idx]
# te_data_features1 = data_features1x[val_idx]
# data_features2 = data_features2x[train_idx]
# te_data_features2 = data_features2x[val_idx]
# data_features3 = data_features3x[train_idx]
# te_data_features3 = data_features3x[val_idx]
# data_features4 = data_features4x[train_idx]
# te_data_features4 = data_features4x[val_idx]
data_features5 = data_features5x[train_idx]
te_data_features5 = data_features5x[val_idx]
data_features6 = data_features6x[train_idx]
te_data_features6 = data_features6x[val_idx]
data_features7 = data_features7x[train_idx]
te_data_features7 = data_features7x[val_idx]
# data_features8 = data_features8x[train_idx]
# te_data_features8 = data_features8x[val_idx]
# data_features9 = data_features9x[train_idx]
# te_data_features9 = data_features9x[val_idx]
# data_features10 = data_features10x[train_idx]
# te_data_features10 = data_features10x[val_idx]
#
label = labelx[train_idx]
te_label = labelx[val_idx]
# print("1")
# ml_model1.fit(data_features1, label)
# print("1")
# ml_model2.fit(data_features2, label)
# print("2")
# ml_model3.fit(data_features3, label)
# print("3")
# ml_model4.fit(data_features4, label)
# print("4")
ml_model5.fit(data_features5, label)
print("5")
ml_model6.fit(data_features6, label)
print("6")
ml_model7.fit(data_features7, label)
print("7")
#ml_model8.fit(data_features8, label)
# print("8")
# ml_model9.fit(data_features9, label)
# print("9")
# ml_model10.fit(data_features10, label)
# print("10")
#
# y_pred1 = ml_model1.predict(te_data_features1)
# k_re_list1.append(accuracy_score(te_label, y_pred1))
# MCC1.append(matthews_corrcoef(te_label, y_pred1))
# SP1.append(float((str(classification_report((y_pred1), te_label, digits=4)).split())[6]))
# SN1.append(float((str(classification_report((y_pred1), te_label, digits=4)).split())[11]))
# print(classification_report((y_pred1), te_label, digits=4))
# print("=========1===========")
# y_pred2 = ml_model2.predict(te_data_features2)
# MCC2.append(matthews_corrcoef(te_label, y_pred2))
# SP2.append(float((str(classification_report((y_pred2), te_label, digits=4)).split())[6]))
# SN2.append(float((str(classification_report((y_pred2), te_label, digits=4)).split())[11]))
# k_re_list2.append(accuracy_score(te_label, y_pred2))
# print(classification_report((y_pred2), te_label, digits=4))
# print("=========2===========")
# y_pred3 = ml_model3.predict(te_data_features3)
# k_re_list3.append(accuracy_score(te_label, y_pred3))
# MCC3.append(matthews_corrcoef(te_label, y_pred3))
# SP3.append(float((str(classification_report((y_pred3), te_label, digits=4)).split())[6]))
# SN3.append(float((str(classification_report((y_pred3), te_label, digits=4)).split())[11]))
# print(classification_report((y_pred3), te_label, digits=4))
# print("=========3===========")
# y_pred4 = ml_model4.predict(te_data_features4)
# k_re_list4.append(accuracy_score(te_label, y_pred4))
# MCC4.append(matthews_corrcoef(te_label, y_pred4))
# SP4.append(float((str(classification_report((y_pred4), te_label, digits=4)).split())[6]))
# SN4.append(float((str(classification_report((y_pred4), te_label, digits=4)).split())[11]))
# print(classification_report((y_pred4), te_label, digits=4))
# print("=========4===========")
y_pred5 = ml_model5.predict(te_data_features5)
k_re_list5.append(accuracy_score(te_label, y_pred5))
MCC5.append(matthews_corrcoef(te_label, y_pred5))
SP5.append(float((str(classification_report((y_pred5), te_label, digits=4)).split())[6]))
SN5.append(float((str(classification_report((y_pred5), te_label, digits=4)).split())[11]))
print(classification_report((y_pred5), te_label, digits=4))
print("=========5===========")
y_pred6 = ml_model6.predict(te_data_features6)
k_re_list6.append(accuracy_score(te_label, y_pred6))
MCC6.append(matthews_corrcoef(te_label, y_pred6))
SP6.append(float((str(classification_report((y_pred6), te_label, digits=4)).split())[6]))
SN6.append(float((str(classification_report((y_pred6), te_label, digits=4)).split())[11]))
print(classification_report((y_pred6), te_label, digits=4))
print("=========6===========")
y_pred7 = ml_model7.predict(te_data_features7)
k_re_list7.append(accuracy_score(te_label, y_pred7))
MCC7.append(matthews_corrcoef(te_label, y_pred7))
SP7.append(float((str(classification_report((y_pred7), te_label, digits=4)).split())[6]))
SN7.append(float((str(classification_report((y_pred7), te_label, digits=4)).split())[11]))
print(classification_report((y_pred7), te_label, digits=4))
print("=========7===========")
# y_pred8 = ml_model8.predict(te_data_features8)
# k_re_list8.append(accuracy_score(te_label, y_pred8))
# MCC8.append(matthews_corrcoef(te_label, y_pred8))
# SP8.append(float((str(classification_report((y_pred8), te_label, digits=4)).split())[6]))
# SN8.append(float((str(classification_report((y_pred8), te_label, digits=4)).split())[11]))
# print(classification_report((y_pred8), te_label, digits=4))
# print("=========8===========")
# y_pred9 = ml_model9.predict(te_data_features9)
# k_re_list9.append(accuracy_score(te_label, y_pred9))
# MCC9.append(matthews_corrcoef(te_label, y_pred9))
# SP9.append(float((str(classification_report((y_pred9), te_label, digits=4)).split())[6]))
# SN9.append(float((str(classification_report((y_pred9), te_label, digits=4)).split())[11]))
# print(classification_report((y_pred9), te_label, digits=4))
# print("=========9===========")
# y_pred10 = ml_model10.predict(te_data_features10)
# k_re_list10.append(accuracy_score(te_label, y_pred10))
# MCC10.append(matthews_corrcoef(te_label, y_pred10))
# SP10.append(float((str(classification_report((y_pred10), te_label, digits=4)).split())[6]))
# SN10.append(float((str(classification_report((y_pred10), te_label, digits=4)).split())[11]))
# print(classification_report((y_pred10), te_label, digits=4))
# print("=========10===========")
# t_pred1 = ml_model1.predict(data_features1)
# t_pred2 = ml_model2.predict(data_features2)
# t_pred3 = ml_model3.predict(data_features3)
# t_pred4 = ml_model4.predict(data_features4)
t_pred5 = ml_model5.predict(data_features5)
t_pred6 = ml_model6.predict(data_features6)
t_pred7 = ml_model7.predict(data_features7)
# t_pred8 = ml_model8.predict(data_features8)
# t_pred9 = ml_model9.predict(data_features9)
# t_pred10 = ml_model10.predict(data_features10)
# y_pred1 = y_pred1.reshape(y_pred1.shape[0], 1)
# y_pred2 = y_pred2.reshape(y_pred2.shape[0], 1)
# y_pred3 = y_pred3.reshape(y_pred3.shape[0], 1)
# y_pred4 = y_pred4.reshape(y_pred4.shape[0], 1)
y_pred5 = y_pred5.reshape(y_pred5.shape[0], 1)
y_pred6 = y_pred6.reshape(y_pred6.shape[0], 1)
y_pred7 = y_pred7.reshape(y_pred7.shape[0], 1)
# y_pred8 = y_pred8.reshape(y_pred8.shape[0], 1)
# y_pred9 = y_pred9.reshape(y_pred9.shape[0], 1)
# y_pred10 = y_pred10.reshape(y_pred10.shape[0], 1)
# t_pred1 = t_pred1.reshape(t_pred1.shape[0], 1)
# t_pred2 = t_pred2.reshape(t_pred2.shape[0], 1)
# t_pred3 = t_pred3.reshape(t_pred3.shape[0], 1)
# t_pred4 = t_pred4.reshape(t_pred4.shape[0], 1)
t_pred5 = t_pred5.reshape(t_pred5.shape[0], 1)
t_pred6 = t_pred6.reshape(t_pred6.shape[0], 1)
t_pred7 = t_pred7.reshape(t_pred7.shape[0], 1)
# t_pred8 = t_pred8.reshape(t_pred8.shape[0], 1)
# t_pred9 = t_pred9.reshape(t_pred9.shape[0], 1)
# t_pred10 = t_pred10.reshape(t_pred10.shape[0], 1)
temf_y_predx = np.concatenate(
(y_pred6,y_pred5, y_pred7), 1) # ,y_pred6,y_pred7,y_pred8,y_pred9,y_pred10 y_pred1,,y_pred4,y_pred8,y_pred9,y_pred10
temf_t_predx = np.concatenate(
(t_pred6, t_pred5, t_pred7), 1) # , t_pred6, t_pred7, t_pred8, t_pred9, t_pred10t_pred1,, t_pred4, t_pred8, t_pred9, t_pred10
estimatorf.fit(temf_t_predx, label)
temf_y_predxf = estimatorf.predict(temf_y_predx)
temf_t_predxf = estimatorf.predict(temf_t_predx)
k_re_list1x.append(accuracy_score(te_label, temf_y_predxf))
MCC1x.append(matthews_corrcoef(te_label, temf_y_predxf))
SP1x.append(float((str(classification_report((temf_y_predxf), te_label, digits=4)).split())[6]))
SN1x.append(float((str(classification_report((temf_y_predxf), te_label, digits=4)).split())[11]))
print(classification_report(temf_y_predxf, te_label, digits=4))
print("=========f===========")
# print(1)
# print("SP: ", sum(SP1) / 10)
# print("SN: ", sum(SN1) / 10)
# print("MCC: ", sum(MCC1) / 10)
# print("ACC: ",sum(k_re_list1) / 10)
# print(2)
# print("SP: ", sum(SP2) / 10)
# print("SN: ", sum(SN2) / 10)
# print("MCC: ", sum(MCC2) / 10)
# print("ACC: ",sum(k_re_list2) / 10)
# print(3)
# print("SP: ", sum(SP3) / 10)
# print("SN: ", sum(SN3) / 10)
# print("MCC: ", sum(MCC3) / 10)
# print("ACC: ",sum(k_re_list3) / 10)
# print(4)
# print("SP: ", sum(SP4) / 10)
# print("SN: ", sum(SN4) / 10)
# print("MCC: ", sum(MCC4) / 10)
# print("ACC: ",sum(k_re_list4) / 10)
print(5)
print("SP: ", sum(SP5) / 10)
print("SN: ", sum(SN5) / 10)
print("MCC: ", sum(MCC5) / 10)
print("ACC: ",sum(k_re_list5) / 10)
print(6)
print("SP: ", sum(SP6) / 10)
print("SN: ", sum(SN6) / 10)
print("MCC: ", sum(MCC6) / 10)
print("ACC: ",sum(k_re_list6) / 10)
print(7)
print("SP: ", sum(SP7) / 10)
print("SN: ", sum(SN7) / 10)
print("MCC: ", sum(MCC7) / 10)
print("ACC: ",sum(k_re_list7) / 10)
# print(8)
# print("SP: ", sum(SP8) / 10)
# print("SN: ", sum(SN8) / 10)
# print("MCC: ", sum(MCC8) / 10)
# print("ACC: ",sum(k_re_list8) / 10)
# print(9)
# print("SP: ", sum(SP9) / 10)
# print("SN: ", sum(SN9) / 10)
# print("MCC: ", sum(MCC9) / 10)
# print("ACC: ",sum(k_re_list9) / 10)
# print(10)
# print("SP: ", sum(SP10) / 10)
# print("SN: ", sum(SN10) / 10)
# print("MCC: ", sum(MCC10) / 10)
# print("ACC: ",sum(k_re_list10) / 10)
print("f")
print("SP: ", sum(SP1x) / 10)
print("SN: ", sum(SN1x) / 10)
print("MCC: ", sum(MCC1x) / 10)
print("ACC: ",sum(k_re_list1x) / 10)
|
[
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.KFold",
"sklearn.metrics.classification_report",
"sklearn.ensemble.ExtraTreesClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.matthews_corrcoef",
"numpy.array",
"sklearn.svm.SVC",
"feature.Feature",
"numpy.concatenate"
] |
[((6598, 6634), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (6618, 6634), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((6649, 6685), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (6669, 6685), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((6700, 6736), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (6720, 6736), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((6751, 6787), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (6771, 6787), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((6802, 6838), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (6822, 6838), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((6853, 6889), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (6873, 6889), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((6904, 6940), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (6924, 6940), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((6955, 6991), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (6975, 6991), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((7006, 7042), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (7026, 7042), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((7058, 7094), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (7078, 7094), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((7567, 7617), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(999)'}), '(n_splits=10, shuffle=True, random_state=999)\n', (7572, 7617), False, 'from sklearn.model_selection import KFold\n'), ((589, 607), 'feature.Feature', 'Feature', (['file_name'], {}), '(file_name)\n', (596, 607), False, 'from feature import Feature\n'), ((693, 711), 'numpy.array', 'np.array', (['seqs_dde'], {}), '(seqs_dde)\n', (701, 711), True, 'import numpy as np\n'), ((730, 750), 'numpy.array', 'np.array', (['train_seqs'], {}), '(train_seqs)\n', (738, 750), True, 'import numpy as np\n'), ((772, 795), 'numpy.array', 'np.array', (['seqs_ksctriad'], {}), '(seqs_ksctriad)\n', (780, 795), True, 'import numpy as np\n'), ((853, 876), 'numpy.array', 'np.array', (['seqs_blosum62'], {}), '(seqs_blosum62)\n', (861, 876), True, 'import numpy as np\n'), ((928, 949), 'numpy.array', 'np.array', (['seqs_ctrial'], {}), '(seqs_ctrial)\n', (936, 949), True, 'import numpy as np\n'), ((967, 986), 'numpy.array', 'np.array', (['seqs_gtpc'], {}), '(seqs_gtpc)\n', (975, 986), True, 'import numpy as np\n'), ((1007, 1029), 'numpy.array', 'np.array', (['seqs_cksaagp'], {}), '(seqs_cksaagp)\n', (1015, 1029), True, 'import numpy as np\n'), ((1047, 1066), 'numpy.array', 'np.array', (['seqs_gaac'], {}), '(seqs_gaac)\n', (1055, 1066), True, 'import numpy as np\n'), ((1086, 1107), 'numpy.array', 'np.array', (['seqs_cksaap'], {}), '(seqs_cksaap)\n', (1094, 1107), True, 'import numpy as np\n'), ((1128, 1163), 'numpy.array', 'np.array', (['seqs_aaindex'], {'dtype': 'float'}), '(seqs_aaindex, dtype=float)\n', (1136, 1163), True, 'import numpy as np\n'), ((1183, 1202), 'numpy.array', 'np.array', (['seqs_paac'], {}), '(seqs_paac)\n', (1191, 1202), True, 'import numpy as np\n'), ((1220, 1239), 'numpy.array', 'np.array', (['seqs_gdpc'], {}), '(seqs_gdpc)\n', (1228, 1239), True, 'import numpy as np\n'), ((1287, 1306), 'numpy.array', 'np.array', (['seqs_ctdt'], {}), '(seqs_ctdt)\n', (1295, 1306), True, 'import numpy as np\n'), ((1383, 1402), 'numpy.array', 'np.array', (['seqs_ctdd'], {}), '(seqs_ctdd)\n', (1391, 1402), True, 'import numpy as np\n'), ((1478, 1496), 'numpy.array', 'np.array', (['seqs_dpc'], {}), '(seqs_dpc)\n', (1486, 1496), True, 'import numpy as np\n'), ((1569, 1587), 'numpy.array', 'np.array', (['seqs_aac'], {}), '(seqs_aac)\n', (1577, 1587), True, 'import numpy as np\n'), ((1658, 1674), 'numpy.array', 'np.array', (['seqs_z'], {}), '(seqs_z)\n', (1666, 1674), True, 'import numpy as np\n'), ((1745, 1765), 'numpy.array', 'np.array', (['seqs_geary'], {}), '(seqs_geary)\n', (1753, 1765), True, 'import numpy as np\n'), ((1844, 1862), 'numpy.array', 'np.array', (['seqs_dde'], {}), '(seqs_dde)\n', (1852, 1862), True, 'import numpy as np\n'), ((2810, 2871), 'numpy.concatenate', 'np.concatenate', (['(seqs_aac, seqs_dde, seqs_paac, seqs_gaac)', '(1)'], {}), '((seqs_aac, seqs_dde, seqs_paac, seqs_gaac), 1)\n', (2824, 2871), True, 'import numpy as np\n'), ((2891, 2952), 'numpy.concatenate', 'np.concatenate', (['(seqs_aac, seqs_dde, seqs_paac, seqs_gdpc)', '(1)'], {}), '((seqs_aac, seqs_dde, seqs_paac, seqs_gdpc), 1)\n', (2905, 2952), True, 'import numpy as np\n'), ((2985, 3046), 'numpy.concatenate', 'np.concatenate', (['(seqs_aac, seqs_dde, seqs_paac, seqs_ctdt)', '(1)'], {}), '((seqs_aac, seqs_dde, seqs_paac, seqs_ctdt), 1)\n', (2999, 3046), True, 'import numpy as np\n'), ((3066, 3128), 'numpy.concatenate', 'np.concatenate', (['(seqs_dde, seqs_paac, seqs_gaac, seqs_gdpc)', '(1)'], {}), '((seqs_dde, seqs_paac, seqs_gaac, seqs_gdpc), 1)\n', (3080, 3128), True, 'import numpy as np\n'), ((3148, 3211), 'numpy.concatenate', 'np.concatenate', (['(seqs_dde, seqs_paac, seqs_ctdt, train_seqs)', '(1)'], {}), '((seqs_dde, seqs_paac, seqs_ctdt, train_seqs), 1)\n', (3162, 3211), True, 'import numpy as np\n'), ((3231, 3305), 'numpy.concatenate', 'np.concatenate', (['(seqs_cksaap, seqs_aac, seqs_dde, seqs_gtpc, seqs_ctdt)', '(1)'], {}), '((seqs_cksaap, seqs_aac, seqs_dde, seqs_gtpc, seqs_ctdt), 1)\n', (3245, 3305), True, 'import numpy as np\n'), ((3325, 3402), 'numpy.concatenate', 'np.concatenate', (['(seqs_cksaap, seqs_aac, seqs_dde, seqs_gdpc, seqs_aaindex)', '(1)'], {}), '((seqs_cksaap, seqs_aac, seqs_dde, seqs_gdpc, seqs_aaindex), 1)\n', (3339, 3402), True, 'import numpy as np\n'), ((3421, 3494), 'numpy.concatenate', 'np.concatenate', (['(seqs_aac, seqs_dde, seqs_paac, seqs_gaac, train_seqs)', '(1)'], {}), '((seqs_aac, seqs_dde, seqs_paac, seqs_gaac, train_seqs), 1)\n', (3435, 3494), True, 'import numpy as np\n'), ((3526, 3600), 'numpy.concatenate', 'np.concatenate', (['(seqs_aac, seqs_dde, seqs_gaac, seqs_geary, train_seqs)', '(1)'], {}), '((seqs_aac, seqs_dde, seqs_gaac, seqs_geary, train_seqs), 1)\n', (3540, 3600), True, 'import numpy as np\n'), ((3620, 3693), 'numpy.concatenate', 'np.concatenate', (['(seqs_dde, seqs_paac, seqs_gtpc, seqs_gdpc, seqs_ctdt)', '(1)'], {}), '((seqs_dde, seqs_paac, seqs_gtpc, seqs_gdpc, seqs_ctdt), 1)\n', (3634, 3693), True, 'import numpy as np\n'), ((3713, 3817), 'numpy.concatenate', 'np.concatenate', (['(seqs_cksaap, seqs_aac, seqs_dde, seqs_gtpc, seqs_gaac, seqs_aaindex,\n train_seqs)', '(1)'], {}), '((seqs_cksaap, seqs_aac, seqs_dde, seqs_gtpc, seqs_gaac,\n seqs_aaindex, train_seqs), 1)\n', (3727, 3817), True, 'import numpy as np\n'), ((3831, 3920), 'numpy.concatenate', 'np.concatenate', (['(seqs_aac, seqs_dde, seqs_dpc, seqs_paac, seqs_gaac, seqs_aaindex)', '(1)'], {}), '((seqs_aac, seqs_dde, seqs_dpc, seqs_paac, seqs_gaac,\n seqs_aaindex), 1)\n', (3845, 3920), True, 'import numpy as np\n'), ((3935, 4029), 'numpy.concatenate', 'np.concatenate', (['(seqs_aac, seqs_dde, seqs_gaac, seqs_ctdt, seqs_aaindex, seqs_ksctriad)', '(1)'], {}), '((seqs_aac, seqs_dde, seqs_gaac, seqs_ctdt, seqs_aaindex,\n seqs_ksctriad), 1)\n', (3949, 4029), True, 'import numpy as np\n'), ((4044, 4158), 'numpy.concatenate', 'np.concatenate', (['(seqs_aac, seqs_dde, seqs_dpc, seqs_paac, seqs_gdpc, seqs_aaindex,\n seqs_ctrial, train_seqs)', '(1)'], {}), '((seqs_aac, seqs_dde, seqs_dpc, seqs_paac, seqs_gdpc,\n seqs_aaindex, seqs_ctrial, train_seqs), 1)\n', (4058, 4158), True, 'import numpy as np\n'), ((4171, 4249), 'numpy.concatenate', 'np.concatenate', (['(seqs_aac, seqs_dde, seqs_gtpc, seqs_ksctriad, seqs_cksaap)', '(1)'], {}), '((seqs_aac, seqs_dde, seqs_gtpc, seqs_ksctriad, seqs_cksaap), 1)\n', (4185, 4249), True, 'import numpy as np\n'), ((5345, 5360), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (5353, 5360), True, 'import numpy as np\n'), ((16321, 16367), 'numpy.concatenate', 'np.concatenate', (['(y_pred6, y_pred5, y_pred7)', '(1)'], {}), '((y_pred6, y_pred5, y_pred7), 1)\n', (16335, 16367), True, 'import numpy as np\n'), ((16483, 16529), 'numpy.concatenate', 'np.concatenate', (['(t_pred6, t_pred5, t_pred7)', '(1)'], {}), '((t_pred6, t_pred5, t_pred7), 1)\n', (16497, 16529), True, 'import numpy as np\n'), ((11976, 12009), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['te_label', 'y_pred5'], {}), '(te_label, y_pred5)\n', (11990, 12009), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((12028, 12064), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['te_label', 'y_pred5'], {}), '(te_label, y_pred5)\n', (12045, 12064), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((12268, 12318), 'sklearn.metrics.classification_report', 'classification_report', (['y_pred5', 'te_label'], {'digits': '(4)'}), '(y_pred5, te_label, digits=4)\n', (12289, 12318), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((12433, 12466), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['te_label', 'y_pred6'], {}), '(te_label, y_pred6)\n', (12447, 12466), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((12485, 12521), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['te_label', 'y_pred6'], {}), '(te_label, y_pred6)\n', (12502, 12521), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((12725, 12775), 'sklearn.metrics.classification_report', 'classification_report', (['y_pred6', 'te_label'], {'digits': '(4)'}), '(y_pred6, te_label, digits=4)\n', (12746, 12775), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((12890, 12923), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['te_label', 'y_pred7'], {}), '(te_label, y_pred7)\n', (12904, 12923), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((12942, 12978), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['te_label', 'y_pred7'], {}), '(te_label, y_pred7)\n', (12959, 12978), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((13184, 13234), 'sklearn.metrics.classification_report', 'classification_report', (['y_pred7', 'te_label'], {'digits': '(4)'}), '(y_pred7, te_label, digits=4)\n', (13205, 13234), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((16809, 16848), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['te_label', 'temf_y_predxf'], {}), '(te_label, temf_y_predxf)\n', (16823, 16848), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((16868, 16910), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['te_label', 'temf_y_predxf'], {}), '(te_label, temf_y_predxf)\n', (16885, 16910), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((17128, 17184), 'sklearn.metrics.classification_report', 'classification_report', (['temf_y_predxf', 'te_label'], {'digits': '(4)'}), '(temf_y_predxf, te_label, digits=4)\n', (17149, 17184), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((7163, 7183), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (7181, 7183), False, 'from sklearn.linear_model import LogisticRegression\n'), ((7203, 7228), 'sklearn.svm.SVC', 'svm.SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (7210, 7228), False, 'from sklearn import svm\n'), ((12093, 12143), 'sklearn.metrics.classification_report', 'classification_report', (['y_pred5', 'te_label'], {'digits': '(4)'}), '(y_pred5, te_label, digits=4)\n', (12114, 12143), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((12188, 12238), 'sklearn.metrics.classification_report', 'classification_report', (['y_pred5', 'te_label'], {'digits': '(4)'}), '(y_pred5, te_label, digits=4)\n', (12209, 12238), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((12550, 12600), 'sklearn.metrics.classification_report', 'classification_report', (['y_pred6', 'te_label'], {'digits': '(4)'}), '(y_pred6, te_label, digits=4)\n', (12571, 12600), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((12645, 12695), 'sklearn.metrics.classification_report', 'classification_report', (['y_pred6', 'te_label'], {'digits': '(4)'}), '(y_pred6, te_label, digits=4)\n', (12666, 12695), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((13007, 13057), 'sklearn.metrics.classification_report', 'classification_report', (['y_pred7', 'te_label'], {'digits': '(4)'}), '(y_pred7, te_label, digits=4)\n', (13028, 13057), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((13102, 13152), 'sklearn.metrics.classification_report', 'classification_report', (['y_pred7', 'te_label'], {'digits': '(4)'}), '(y_pred7, te_label, digits=4)\n', (13123, 13152), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((16940, 16996), 'sklearn.metrics.classification_report', 'classification_report', (['temf_y_predxf', 'te_label'], {'digits': '(4)'}), '(temf_y_predxf, te_label, digits=4)\n', (16961, 16996), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n'), ((17042, 17098), 'sklearn.metrics.classification_report', 'classification_report', (['temf_y_predxf', 'te_label'], {'digits': '(4)'}), '(temf_y_predxf, te_label, digits=4)\n', (17063, 17098), False, 'from sklearn.metrics import classification_report, accuracy_score, matthews_corrcoef\n')]
|
# Copyright 2019 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import re
import time
import unittest
import flask
from flask_ipban.ip_ban import IpBan
page_text = 'Hello, world. {}'
localhost = '127.0.0.1'
def hello_world(parameter=None):
return page_text.format(parameter)
class TestIpBan(unittest.TestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.ban_seconds = 2
self.ip_ban = IpBan(self.app, ban_seconds=self.ban_seconds, ban_count=5, secret_key='yo-yo-yo', ipc=False)
self.ip_ban.ip_whitelist_remove(localhost)
self.client = self.app.test_client()
self.app.route('/')(hello_world)
def test_cidr(self):
self.assertFalse(self.ip_ban.test_pattern_blocklist(ip='192.0.2.1'))
self.ip_ban.block_cidr('192.0.2.0/28')
self.assertTrue(self.ip_ban.test_pattern_blocklist(ip='192.0.2.1'))
self.assertFalse(self.ip_ban.test_pattern_blocklist(ip='172.16.17.32'))
def testAddRemoveIpWhitelist(self):
self.assertEqual(self.ip_ban.ip_whitelist_add(localhost), 1)
for x in range(self.ip_ban.ban_count * 2):
response = self.client.get('/doesnotexist')
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertTrue(self.ip_ban.ip_whitelist_remove(localhost))
for x in range(self.ip_ban.ban_count * 2):
response = self.client.get('/doesnotexist')
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
self.assertFalse(self.ip_ban.ip_whitelist_remove(localhost))
def testAddRemoveIpWhitelistByList(self):
self.assertEqual(self.ip_ban.ip_whitelist_add([localhost]), 1)
for x in range(self.ip_ban.ban_count * 2):
response = self.client.get('/doesnotexist')
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertTrue(self.ip_ban.ip_whitelist_remove([localhost]))
for x in range(self.ip_ban.ban_count * 2):
response = self.client.get('/doesnotexist')
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
self.assertFalse(self.ip_ban.ip_whitelist_remove(localhost))
def testAddRemoveUrlWhitelist(self):
test_pattern = '^/no_exist/[0-9]+$'
test_url = '/no_exist'
self.assertTrue(re.match(test_pattern, test_url + '/123'))
self.assertFalse(re.match(test_pattern, test_url))
existing_count = len(self.ip_ban._url_whitelist_patterns)
self.assertEqual(self.ip_ban.url_pattern_add(test_pattern), existing_count + 1)
for x in range(self.ip_ban.ban_count * 2):
self.client.get('{}/{}'.format(test_url, x))
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertTrue(self.ip_ban.url_pattern_remove(test_pattern))
for x in range(self.ip_ban.ban_count * 2):
self.client.get('{}/{}'.format(test_url, x))
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
self.assertFalse(self.ip_ban.url_pattern_remove(localhost))
def testUrlWhitelistString(self):
test_url = '/no_exist'
existing_count = len(self.ip_ban._url_whitelist_patterns)
self.assertEqual(self.ip_ban.url_pattern_add(test_url, 'string'), existing_count + 1)
for x in range(self.ip_ban.ban_count * 2):
response = self.client.get('{}?{}'.format(test_url, x))
self.assertEqual(response.status_code, 404)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def testBlock(self):
self.assertEqual(self.ip_ban.block([localhost, '172.16.58.3']), 2)
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
def testTimeout(self):
test_url = '/doesnotexist'
for x in range(self.ip_ban.ban_count * 2):
self.client.get('{}/{}'.format(test_url, x))
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
time.sleep(self.ban_seconds + 1)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def testManualBlockTimeout(self):
self.ip_ban.block([localhost])
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
time.sleep(self.ban_seconds + 1)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def testBlockPermanent(self):
self.ip_ban.block([localhost], permanent=True)
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
time.sleep(self.ban_seconds + 2)
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
def testAdd(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.ip_ban.add(ip=localhost, url='/')
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
for x in range(self.ip_ban.ban_count + 1):
self.ip_ban.add(ip=localhost, url='/')
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
def testGetBlockList(self):
block_list = self.ip_ban.get_block_list()
self.assertFalse(block_list)
for x in range(self.ip_ban.ban_count + 1):
self.ip_ban.add(ip=localhost, url='/')
block_list = self.ip_ban.get_block_list()
self.assertIn(localhost, block_list)
def testKeepOnBlocking(self):
# block should not timeout if spamming continues
test_url = '/doesnotexist'
for x in range(self.ip_ban.ban_count * 2):
self.client.get('{}/{}'.format(test_url, x))
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
for x in range(self.ban_seconds * 2):
time.sleep(1)
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
def testAddRemoveUrlBlocklist(self):
test_pattern = '^/bad/[0-9]+$'
test_url = '/bad'
self.app.route('/good/<int:parameter>')(hello_world)
self.assertTrue(re.match(test_pattern, test_url + '/123'))
self.assertFalse(re.match(test_pattern, test_url))
# no block
response = self.client.get('/good/{}'.format(456))
self.assertEqual(response.status_code, 200)
# getting index page is blocked after block 404 url get
self.assertEqual(self.ip_ban.url_block_pattern_add(test_pattern), 1)
response = self.client.get('{}/{}'.format(test_url, 123))
self.assertEqual(response.status_code, 404)
# should now be banned
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
# ban removed after timeout
time.sleep(self.ban_seconds + 1)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
response = self.client.get('{}/{}'.format(test_url, 123))
self.assertEqual(response.status_code, 404)
# ban remains even after pattern removed
# caused by previous 404
self.assertTrue(self.ip_ban.url_block_pattern_remove(test_pattern))
response = self.client.get('{}/{}'.format(test_url, 456))
self.assertEqual(response.status_code, 403)
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
# ban removed after timeout
time.sleep(self.ban_seconds + 1)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
response = self.client.get('{}/{}'.format(test_url, 123))
self.assertEqual(response.status_code, 404)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
# already removed
self.assertFalse(self.ip_ban.url_block_pattern_remove(localhost))
def testLoadNuisances(self):
self.app.route('/regextest/page.<parameter>')(hello_world)
# test is ok before nuisances loaded
response = self.client.get('/regextest/page.{e}?yolo={e}'.format(e='jsp'))
self.assertEqual(response.status_code, 200)
self.ip_ban.load_nuisances()
# test blocked extensions
for e in ['php', 'jsp', 'aspx', 'do', 'cgi']:
self.assertTrue(self.ip_ban.test_pattern_blocklist('/regextest/page.{}'.format(e)), e)
# and with parameters
self.assertTrue(self.ip_ban.test_pattern_blocklist('/regextest/page.{e}?extension={e}'.format(e=e)), e)
# test blocked url strings and patterns
for e in ['/admin/assets/js/views/login.js', '/vip163mx00.mxmail.netease.com:25', '/manager/html',
'/wp-login.php']:
self.assertTrue(self.ip_ban.test_pattern_blocklist(e), e)
# test blocked ip
for e in ['172.16.17.32']:
self.assertTrue(self.ip_ban.test_pattern_blocklist(e, ip=e), e)
self.assertFalse(self.ip_ban.test_pattern_blocklist(e, ip=localhost), e)
# test real blocking
e = 'jsp'
response = self.client.get('/regextest/page.{}'.format(e))
self.assertEqual(response.status_code, 200, e)
# goto 404
response = self.client.get('/doesnotexist/page.{}'.format(e))
self.assertEqual(response.status_code, 404, e)
# this ip is now blocked
response = self.client.get('/')
self.assertEqual(response.status_code, 403)
# allow-regex
response = self.client.get('/.well-known/flong')
self.assertEqual(response.status_code, 404)
def test_remove(self):
self.ip_ban.block(['100.200.300.400'])
self.assertFalse(self.ip_ban.remove('1.2.3.4'))
self.assertTrue(self.ip_ban.remove('100.200.300.400'))
self.ip_ban.block([localhost])
response = self.client.get('/')
self.assertEqual(403, response.status_code)
self.assertTrue(self.ip_ban.remove(localhost))
response = self.client.get('/')
self.assertEqual(200, response.status_code)
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run()
|
[
"unittest.TextTestRunner",
"flask.Flask",
"re.match",
"time.sleep",
"flask_ipban.ip_ban.IpBan"
] |
[((10897, 10935), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'failfast': '(True)'}), '(failfast=True)\n', (10920, 10935), False, 'import unittest\n'), ((911, 932), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (922, 932), False, 'import flask\n'), ((984, 1081), 'flask_ipban.ip_ban.IpBan', 'IpBan', (['self.app'], {'ban_seconds': 'self.ban_seconds', 'ban_count': '(5)', 'secret_key': '"""yo-yo-yo"""', 'ipc': '(False)'}), "(self.app, ban_seconds=self.ban_seconds, ban_count=5, secret_key=\n 'yo-yo-yo', ipc=False)\n", (989, 1081), False, 'from flask_ipban.ip_ban import IpBan\n'), ((4720, 4752), 'time.sleep', 'time.sleep', (['(self.ban_seconds + 1)'], {}), '(self.ban_seconds + 1)\n', (4730, 4752), False, 'import time\n'), ((5023, 5055), 'time.sleep', 'time.sleep', (['(self.ban_seconds + 1)'], {}), '(self.ban_seconds + 1)\n', (5033, 5055), False, 'import time\n'), ((5338, 5370), 'time.sleep', 'time.sleep', (['(self.ban_seconds + 2)'], {}), '(self.ban_seconds + 2)\n', (5348, 5370), False, 'import time\n'), ((7583, 7615), 'time.sleep', 'time.sleep', (['(self.ban_seconds + 1)'], {}), '(self.ban_seconds + 1)\n', (7593, 7615), False, 'import time\n'), ((8240, 8272), 'time.sleep', 'time.sleep', (['(self.ban_seconds + 1)'], {}), '(self.ban_seconds + 1)\n', (8250, 8272), False, 'import time\n'), ((2962, 3003), 're.match', 're.match', (['test_pattern', "(test_url + '/123')"], {}), "(test_pattern, test_url + '/123')\n", (2970, 3003), False, 'import re\n'), ((3030, 3062), 're.match', 're.match', (['test_pattern', 'test_url'], {}), '(test_pattern, test_url)\n', (3038, 3062), False, 'import re\n'), ((6614, 6627), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6624, 6627), False, 'import time\n'), ((6922, 6963), 're.match', 're.match', (['test_pattern', "(test_url + '/123')"], {}), "(test_pattern, test_url + '/123')\n", (6930, 6963), False, 'import re\n'), ((6990, 7022), 're.match', 're.match', (['test_pattern', 'test_url'], {}), '(test_pattern, test_url)\n', (6998, 7022), False, 'import re\n')]
|
"""
Tensorflow implementation of DeepFM [1]
Reference:
[1] DeepFM: A Factorization-Machine based Neural Network for CTR Prediction,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"""
import numpy as np
import tensorflow as tf
import os
import example.config as config
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score
from time import time
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
# from yellowfin import YFOptimizer
class DeepFM(BaseEstimator, TransformerMixin):
def __init__(self, feature_size, field_size,
embedding_size=8, dropout_fm=[1.0, 1.0],
deep_layers=[32, 32], dropout_deep=[0.5, 0.5, 0.5],
deep_layers_activation=tf.nn.relu,
epoch=10, batch_size=256,
learning_rate=0.001, optimizer_type="adam",
batch_norm=0, batch_norm_decay=0.995,
verbose=False, random_seed=2016,
use_fm=True, use_deep=True,
loss_type="logloss", eval_metric=roc_auc_score,
l2_reg=0.0, greater_is_better=True, is_finetune=False):
assert (use_fm or use_deep)
assert loss_type in ["logloss", "mse"], \
"loss_type can be either 'logloss' for classification task or 'mse' for regression task"
self.feature_size = feature_size # denote as M, size of the feature dictionary
self.field_size = field_size # denote as F, size of the feature fields
self.embedding_size = embedding_size # denote as K, size of the feature embedding
self.dropout_fm = dropout_fm
self.deep_layers = deep_layers
self.dropout_deep = dropout_deep
self.deep_layers_activation = deep_layers_activation
self.use_fm = use_fm
self.use_deep = use_deep
self.l2_reg = l2_reg
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.optimizer_type = optimizer_type
self.batch_norm = batch_norm
self.batch_norm_decay = batch_norm_decay
self.verbose = verbose
self.random_seed = random_seed
self.loss_type = loss_type
self.eval_metric = eval_metric
self.greater_is_better = greater_is_better
self.train_result, self.valid_result = [], []
self.is_finetune = is_finetune
self._init_graph()
def _init_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
tf.set_random_seed(self.random_seed)
self.sess = self._init_session()
self.feat_index = tf.placeholder(tf.int32, shape=[None, None], name="feat_index") # None * F
self.feat_value = tf.placeholder(tf.float32, shape=[None, None], name="feat_value") # None * F
self.label = tf.placeholder(tf.float32, shape=[None, 1], name="label") # None * 1
self.dropout_keep_fm = tf.placeholder(tf.float32, shape=[None], name="dropout_keep_fm")
self.dropout_keep_deep = tf.placeholder(tf.float32, shape=[None], name="dropout_keep_deep")
self.train_phase = tf.placeholder(tf.bool, name="train_phase")
self.weights = self._initialize_weights()
# model
self.embeddings = tf.nn.embedding_lookup(self.weights["feature_embeddings"], self.feat_index) # None * F * K
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1])
self.embeddings = tf.multiply(self.embeddings, feat_value)
# ---------- first order term ----------
self.y_first_order = tf.nn.embedding_lookup(self.weights["feature_bias"], self.feat_index) # None * F * 1
self.y_first_order = tf.reduce_sum(tf.multiply(self.y_first_order, feat_value), 2) # None * F
self.y_first_order = tf.nn.dropout(self.y_first_order, self.dropout_keep_fm[0]) # None * F
# ---------- second order term ---------------
# sum_square part
self.summed_features_emb = tf.reduce_sum(self.embeddings, 1) # None * K
self.summed_features_emb_square = tf.square(self.summed_features_emb) # None * K
# square_sum part
self.squared_features_emb = tf.square(self.embeddings)
self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb, 1) # None * K
# second order
self.y_second_order = 0.5 * tf.subtract(self.summed_features_emb_square, self.squared_sum_features_emb) # None * K
self.y_second_order = tf.nn.dropout(self.y_second_order, self.dropout_keep_fm[1]) # None * K
# ---------- Deep component ----------
self.y_deep = tf.reshape(self.embeddings, shape=[-1, self.field_size * self.embedding_size]) # None * (F*K)
self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[0])
for i in range(0, len(self.deep_layers)):
self.y_deep = tf.add(tf.matmul(self.y_deep, self.weights["layer_%d" %i]), self.weights["bias_%d"%i]) # None * layer[i] * 1
if self.batch_norm:
self.y_deep = self.batch_norm_layer(self.y_deep, train_phase=self.train_phase, scope_bn="bn_%d" %i) # None * layer[i] * 1
self.y_deep = self.deep_layers_activation(self.y_deep)
self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[1+i]) # dropout at each Deep layer
# ---------- DeepFM ----------
if self.use_fm and self.use_deep:
concat_input = tf.concat([self.y_first_order, self.y_second_order, self.y_deep], axis=1)
elif self.use_fm:
concat_input = tf.concat([self.y_first_order, self.y_second_order], axis=1)
elif self.use_deep:
concat_input = self.y_deep
self.out = tf.add(tf.matmul(concat_input, self.weights["concat_projection"]), self.weights["concat_bias"])
# loss
if self.loss_type == "logloss":
self.out = tf.nn.sigmoid(self.out)
self.loss = tf.losses.log_loss(self.label, self.out)
elif self.loss_type == "mse":
self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))
# l2 regularization on weights
if self.l2_reg > 0:
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["concat_projection"])
if self.use_deep:
for i in range(len(self.deep_layers)):
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["layer_%d"%i])
# for nn in self.graph.as_graph_def().node:
# print(nn.name)
# optimizer
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-8).minimize(self.loss)
elif self.optimizer_type == "adagrad":
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate,
initial_accumulator_value=1e-8).minimize(self.loss)
elif self.optimizer_type == "gd":
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
elif self.optimizer_type == "momentum":
self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).minimize(
self.loss)
# elif self.optimizer_type == "yellowfin":
# self.optimizer = YFOptimizer(learning_rate=self.learning_rate, momentum=0.0).minimize(
# self.loss)
# init
self.saver = tf.train.Saver()
if self.is_finetune:
self._load_old_model()
# opt = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95)
# self.optimizer = opt.minimize(self.loss)
# self.sess.run(tf.variables_initializer(opt.variables()))
# pass
else:
init = tf.global_variables_initializer()
self.sess.run(init)
# number of params
total_parameters = 0
for variable in self.weights.values():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
if self.verbose > 0:
print("#params: %d" % total_parameters)
def _init_session(self):
config = tf.ConfigProto(device_count={"gpu": 0})
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def _load_old_model(self):
weights = dict()
#saver = tf.train.import_meta_graph(os.path.join(config.SUB_DIR, 'model/0', 'fm-series-%d.meta' % (self.epoch-1)))
self.saver.restore(self.sess, tf.train.latest_checkpoint(os.path.join(config.SUB_DIR, 'model/0')))
# graph = tf.get_default_graph()
# weights["feature_embeddings"] = graph.get_tensor_by_name("feature_embeddings:0")
# weights["feature_bias"] = graph.get_tensor_by_name("feature_bias:0")
# weights["layer_0"] = graph.get_tensor_by_name("layer_0:0")
# weights["bias_0"] = graph.get_tensor_by_name("bias_0:0")
# num_layer = len(self.deep_layers)
# for i in range(1, num_layer):
# weights["layer_%d" % i] = graph.get_tensor_by_name("layer_%d:0" % i)
# weights["bias_%d"% i] = graph.get_tensor_by_name("bias_%d:0" % i)
#
# weights["concat_projection"] = graph.get_tensor_by_name("concat_projection:0")
# weights["concat_bias"] = graph.get_tensor_by_name("concat_bias:0")
#
# self.weights = weights
def _initialize_weights(self):
weights = dict()
# embeddings
weights["feature_embeddings"] = tf.Variable(
tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.01),
name="feature_embeddings") # feature_size * K
weights["feature_bias"] = tf.Variable(
tf.random_uniform([self.feature_size, 1], 0.0, 1.0), name="feature_bias") # feature_size * 1
# deep layers
num_layer = len(self.deep_layers)
input_size = self.field_size * self.embedding_size
glorot = np.sqrt(2.0 / (input_size + self.deep_layers[0]))
weights["layer_0"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, self.deep_layers[0])), dtype=np.float32, name="layer_0")
weights["bias_0"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])),
dtype=np.float32, name="bias_0") # 1 * layers[0]
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.deep_layers[i-1] + self.deep_layers[i]))
weights["layer_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[i-1], self.deep_layers[i])),
dtype=np.float32, name="layer_%d" % i) # layers[i-1] * layers[i]
weights["bias_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[i])),
dtype=np.float32, name="bias_%d" % i ) # 1 * layer[i]
# final concat projection layer
if self.use_fm and self.use_deep:
input_size = self.field_size + self.embedding_size + self.deep_layers[-1]
elif self.use_fm:
input_size = self.field_size + self.embedding_size
elif self.use_deep:
input_size = self.deep_layers[-1]
glorot = np.sqrt(2.0 / (input_size + 1))
weights["concat_projection"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),
dtype=np.float32, name="concat_projection") # layers[i-1]*layers[i]
weights["concat_bias"] = tf.Variable(tf.constant(0.01), dtype=np.float32, name="concat_bias")
return weights
def batch_norm_layer(self, x, train_phase, scope_bn):
bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=True, reuse=None, trainable=True, scope=scope_bn)
bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=False, reuse=True, trainable=True, scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
def get_batch(self, Xi, Xv, y, batch_size, index):
start = index * batch_size
end = (index+1) * batch_size
end = end if end < len(y) else len(y)
return Xi[start:end], Xv[start:end], [[y_] for y_ in y[start:end]]
# shuffle three lists simutaneously
def shuffle_in_unison_scary(self, a, b, c):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
def fit_on_batch(self, Xi, Xv, y):
feed_dict = {self.feat_index: Xi,
self.feat_value: Xv,
self.label: y,
self.dropout_keep_fm: self.dropout_fm,
self.dropout_keep_deep: self.dropout_deep,
self.train_phase: True}
loss, opt = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict)
return loss
def fit(self, Xi_train, Xv_train, y_train,
Xi_valid=None, Xv_valid=None, y_valid=None,
early_stopping=False, refit=False, fold_seq = 0):
"""
:param Xi_train: [[ind1_1, ind1_2, ...], [ind2_1, ind2_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]
indi_j is the feature index of feature field j of sample i in the training set
:param Xv_train: [[val1_1, val1_2, ...], [val2_1, val2_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]
vali_j is the feature value of feature field j of sample i in the training set
vali_j can be either binary (1/0, for binary/categorical features) or float (e.g., 10.24, for numerical features)
:param y_train: label of each sample in the training set
:param Xi_valid: list of list of feature indices of each sample in the validation set
:param Xv_valid: list of list of feature values of each sample in the validation set
:param y_valid: label of each sample in the validation set
:param early_stopping: perform early stopping or not
:param refit: refit the model on the train+valid dataset or not
:return: None
"""
has_valid = Xv_valid is not None
for epoch in range(self.epoch):
t1 = time()
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i)
self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
self.saver.save(self.sess, os.path.join(config.SUB_DIR, 'model', str(fold_seq), 'fm-series'), global_step=epoch)
# evaluate training and validation datasets
train_result = self.evaluate(Xi_train, Xv_train, y_train)
self.train_result.append(train_result)
if has_valid:
valid_result = self.evaluate(Xi_valid, Xv_valid, y_valid)
self.valid_result.append(valid_result)
if self.verbose > 0 and epoch % self.verbose == 0:
if has_valid:
print("[%d] train-result=%.4f, valid-result=%.4f [%.1f s]"
% (epoch + 1, train_result, valid_result, time() - t1))
else:
print("[%d] train-result=%.4f [%.1f s]"
% (epoch + 1, train_result, time() - t1))
if has_valid and early_stopping and self.training_termination(self.valid_result):
break
# fit a few more epoch on train+valid until result reaches the best_train_score
if has_valid and refit:
if self.greater_is_better:
best_valid_score = max(self.valid_result)
else:
best_valid_score = min(self.valid_result)
best_epoch = self.valid_result.index(best_valid_score)
best_train_score = self.train_result[best_epoch]
Xi_train = Xi_train + Xi_valid
Xv_train = Xv_train + Xv_valid
y_train = y_train + y_valid
for epoch in range(100):
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train,
self.batch_size, i)
self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
# check
train_result = self.evaluate(Xi_train, Xv_train, y_train)
if abs(train_result - best_train_score) < 0.001 or \
(self.greater_is_better and train_result > best_train_score) or \
((not self.greater_is_better) and train_result < best_train_score):
break
def training_termination(self, valid_result):
if len(valid_result) > 5:
if self.greater_is_better:
if valid_result[-1] < valid_result[-2] and \
valid_result[-2] < valid_result[-3] and \
valid_result[-3] < valid_result[-4] and \
valid_result[-4] < valid_result[-5]:
return True
else:
if valid_result[-1] > valid_result[-2] and \
valid_result[-2] > valid_result[-3] and \
valid_result[-3] > valid_result[-4] and \
valid_result[-4] > valid_result[-5]:
return True
return False
def predict(self, Xi, Xv):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:return: predicted probability of each sample
"""
# dummy y
dummy_y = [1] * len(Xi)
batch_index = 0
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
y_pred = None
while len(Xi_batch) > 0:
num_batch = len(y_batch)
feed_dict = {self.feat_index: Xi_batch,
self.feat_value: Xv_batch,
self.label: y_batch,
self.dropout_keep_fm: [1.0] * len(self.dropout_fm),
self.dropout_keep_deep: [1.0] * len(self.dropout_deep),
self.train_phase: False}
batch_out = self.sess.run(self.out, feed_dict=feed_dict)
if batch_index == 0:
y_pred = np.reshape(batch_out, (num_batch,))
else:
y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))
batch_index += 1
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
return y_pred
def evaluate(self, Xi, Xv, y):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:param y: label of each sample in the dataset
:return: metric of the evaluation
"""
y_pred = self.predict(Xi, Xv)
return self.eval_metric(y, y_pred)
|
[
"tensorflow.cond",
"tensorflow.reduce_sum",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.reshape",
"numpy.random.set_state",
"tensorflow.train.AdamOptimizer",
"tensorflow.ConfigProto",
"tensorflow.multiply",
"tensorflow.matmul",
"tensorflow.losses.log_loss",
"numpy.random.normal",
"os.path.join",
"tensorflow.subtract",
"tensorflow.concat",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"numpy.reshape",
"numpy.random.shuffle",
"tensorflow.train.Saver",
"tensorflow.nn.embedding_lookup",
"tensorflow.global_variables_initializer",
"tensorflow.train.AdagradOptimizer",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.random_normal",
"tensorflow.train.MomentumOptimizer",
"tensorflow.Graph",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.random_uniform",
"tensorflow.contrib.layers.python.layers.batch_norm",
"numpy.random.get_state",
"time.time",
"tensorflow.square",
"tensorflow.nn.sigmoid",
"tensorflow.nn.dropout",
"numpy.sqrt"
] |
[((2513, 2523), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2521, 2523), True, 'import tensorflow as tf\n'), ((8938, 8977), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'gpu': 0}"}), "(device_count={'gpu': 0})\n", (8952, 8977), True, 'import tensorflow as tf\n'), ((9040, 9065), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (9050, 9065), True, 'import tensorflow as tf\n'), ((10739, 10788), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (input_size + self.deep_layers[0]))'], {}), '(2.0 / (input_size + self.deep_layers[0]))\n', (10746, 10788), True, 'import numpy as np\n'), ((12087, 12118), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (input_size + 1))'], {}), '(2.0 / (input_size + 1))\n', (12094, 12118), True, 'import numpy as np\n'), ((12554, 12717), 'tensorflow.contrib.layers.python.layers.batch_norm', 'batch_norm', (['x'], {'decay': 'self.batch_norm_decay', 'center': '(True)', 'scale': '(True)', 'updates_collections': 'None', 'is_training': '(True)', 'reuse': 'None', 'trainable': '(True)', 'scope': 'scope_bn'}), '(x, decay=self.batch_norm_decay, center=True, scale=True,\n updates_collections=None, is_training=True, reuse=None, trainable=True,\n scope=scope_bn)\n', (12564, 12717), True, 'from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm\n'), ((12763, 12927), 'tensorflow.contrib.layers.python.layers.batch_norm', 'batch_norm', (['x'], {'decay': 'self.batch_norm_decay', 'center': '(True)', 'scale': '(True)', 'updates_collections': 'None', 'is_training': '(False)', 'reuse': '(True)', 'trainable': '(True)', 'scope': 'scope_bn'}), '(x, decay=self.batch_norm_decay, center=True, scale=True,\n updates_collections=None, is_training=False, reuse=True, trainable=True,\n scope=scope_bn)\n', (12773, 12927), True, 'from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm\n'), ((12966, 13028), 'tensorflow.cond', 'tf.cond', (['train_phase', '(lambda : bn_train)', '(lambda : bn_inference)'], {}), '(train_phase, lambda : bn_train, lambda : bn_inference)\n', (12973, 13028), True, 'import tensorflow as tf\n'), ((13404, 13425), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (13423, 13425), True, 'import numpy as np\n'), ((13434, 13454), 'numpy.random.shuffle', 'np.random.shuffle', (['a'], {}), '(a)\n', (13451, 13454), True, 'import numpy as np\n'), ((13463, 13493), 'numpy.random.set_state', 'np.random.set_state', (['rng_state'], {}), '(rng_state)\n', (13482, 13493), True, 'import numpy as np\n'), ((13502, 13522), 'numpy.random.shuffle', 'np.random.shuffle', (['b'], {}), '(b)\n', (13519, 13522), True, 'import numpy as np\n'), ((13531, 13561), 'numpy.random.set_state', 'np.random.set_state', (['rng_state'], {}), '(rng_state)\n', (13550, 13561), True, 'import numpy as np\n'), ((13570, 13590), 'numpy.random.shuffle', 'np.random.shuffle', (['c'], {}), '(c)\n', (13587, 13590), True, 'import numpy as np\n'), ((2575, 2611), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['self.random_seed'], {}), '(self.random_seed)\n', (2593, 2611), True, 'import tensorflow as tf\n'), ((2687, 2750), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""feat_index"""'}), "(tf.int32, shape=[None, None], name='feat_index')\n", (2701, 2750), True, 'import tensorflow as tf\n'), ((2793, 2858), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None]', 'name': '"""feat_value"""'}), "(tf.float32, shape=[None, None], name='feat_value')\n", (2807, 2858), True, 'import tensorflow as tf\n'), ((2896, 2953), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]', 'name': '"""label"""'}), "(tf.float32, shape=[None, 1], name='label')\n", (2910, 2953), True, 'import tensorflow as tf\n'), ((3001, 3065), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]', 'name': '"""dropout_keep_fm"""'}), "(tf.float32, shape=[None], name='dropout_keep_fm')\n", (3015, 3065), True, 'import tensorflow as tf\n'), ((3103, 3169), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]', 'name': '"""dropout_keep_deep"""'}), "(tf.float32, shape=[None], name='dropout_keep_deep')\n", (3117, 3169), True, 'import tensorflow as tf\n'), ((3201, 3244), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""train_phase"""'}), "(tf.bool, name='train_phase')\n", (3215, 3244), True, 'import tensorflow as tf\n'), ((3351, 3426), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (["self.weights['feature_embeddings']", 'self.feat_index'], {}), "(self.weights['feature_embeddings'], self.feat_index)\n", (3373, 3426), True, 'import tensorflow as tf\n'), ((3468, 3527), 'tensorflow.reshape', 'tf.reshape', (['self.feat_value'], {'shape': '[-1, self.field_size, 1]'}), '(self.feat_value, shape=[-1, self.field_size, 1])\n', (3478, 3527), True, 'import tensorflow as tf\n'), ((3558, 3598), 'tensorflow.multiply', 'tf.multiply', (['self.embeddings', 'feat_value'], {}), '(self.embeddings, feat_value)\n', (3569, 3598), True, 'import tensorflow as tf\n'), ((3686, 3755), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (["self.weights['feature_bias']", 'self.feat_index'], {}), "(self.weights['feature_bias'], self.feat_index)\n", (3708, 3755), True, 'import tensorflow as tf\n'), ((3911, 3969), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.y_first_order', 'self.dropout_keep_fm[0]'], {}), '(self.y_first_order, self.dropout_keep_fm[0])\n', (3924, 3969), True, 'import tensorflow as tf\n'), ((4110, 4143), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.embeddings', '(1)'], {}), '(self.embeddings, 1)\n', (4123, 4143), True, 'import tensorflow as tf\n'), ((4202, 4237), 'tensorflow.square', 'tf.square', (['self.summed_features_emb'], {}), '(self.summed_features_emb)\n', (4211, 4237), True, 'import tensorflow as tf\n'), ((4321, 4347), 'tensorflow.square', 'tf.square', (['self.embeddings'], {}), '(self.embeddings)\n', (4330, 4347), True, 'import tensorflow as tf\n'), ((4392, 4435), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.squared_features_emb', '(1)'], {}), '(self.squared_features_emb, 1)\n', (4405, 4435), True, 'import tensorflow as tf\n'), ((4638, 4697), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.y_second_order', 'self.dropout_keep_fm[1]'], {}), '(self.y_second_order, self.dropout_keep_fm[1])\n', (4651, 4697), True, 'import tensorflow as tf\n'), ((4788, 4866), 'tensorflow.reshape', 'tf.reshape', (['self.embeddings'], {'shape': '[-1, self.field_size * self.embedding_size]'}), '(self.embeddings, shape=[-1, self.field_size * self.embedding_size])\n', (4798, 4866), True, 'import tensorflow as tf\n'), ((4908, 4961), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.y_deep', 'self.dropout_keep_deep[0]'], {}), '(self.y_deep, self.dropout_keep_deep[0])\n', (4921, 4961), True, 'import tensorflow as tf\n'), ((7997, 8013), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (8011, 8013), True, 'import tensorflow as tf\n'), ((10315, 10384), 'tensorflow.random_normal', 'tf.random_normal', (['[self.feature_size, self.embedding_size]', '(0.0)', '(0.01)'], {}), '([self.feature_size, self.embedding_size], 0.0, 0.01)\n', (10331, 10384), True, 'import tensorflow as tf\n'), ((10504, 10555), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.feature_size, 1]', '(0.0)', '(1.0)'], {}), '([self.feature_size, 1], 0.0, 1.0)\n', (10521, 10555), True, 'import tensorflow as tf\n'), ((10843, 10920), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'glorot', 'size': '(input_size, self.deep_layers[0])'}), '(loc=0, scale=glorot, size=(input_size, self.deep_layers[0]))\n', (10859, 10920), True, 'import numpy as np\n'), ((10996, 11064), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'glorot', 'size': '(1, self.deep_layers[0])'}), '(loc=0, scale=glorot, size=(1, self.deep_layers[0]))\n', (11012, 11064), True, 'import numpy as np\n'), ((11231, 11293), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (self.deep_layers[i - 1] + self.deep_layers[i]))'], {}), '(2.0 / (self.deep_layers[i - 1] + self.deep_layers[i]))\n', (11238, 11293), True, 'import numpy as np\n'), ((12195, 12254), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'glorot', 'size': '(input_size, 1)'}), '(loc=0, scale=glorot, size=(input_size, 1))\n', (12211, 12254), True, 'import numpy as np\n'), ((12394, 12411), 'tensorflow.constant', 'tf.constant', (['(0.01)'], {}), '(0.01)\n', (12405, 12411), True, 'import tensorflow as tf\n'), ((15371, 15377), 'time.time', 'time', ([], {}), '()\n', (15375, 15377), False, 'from time import time\n'), ((3818, 3861), 'tensorflow.multiply', 'tf.multiply', (['self.y_first_order', 'feat_value'], {}), '(self.y_first_order, feat_value)\n', (3829, 3861), True, 'import tensorflow as tf\n'), ((4516, 4591), 'tensorflow.subtract', 'tf.subtract', (['self.summed_features_emb_square', 'self.squared_sum_features_emb'], {}), '(self.summed_features_emb_square, self.squared_sum_features_emb)\n', (4527, 4591), True, 'import tensorflow as tf\n'), ((5434, 5491), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.y_deep', 'self.dropout_keep_deep[1 + i]'], {}), '(self.y_deep, self.dropout_keep_deep[1 + i])\n', (5447, 5491), True, 'import tensorflow as tf\n'), ((5640, 5713), 'tensorflow.concat', 'tf.concat', (['[self.y_first_order, self.y_second_order, self.y_deep]'], {'axis': '(1)'}), '([self.y_first_order, self.y_second_order, self.y_deep], axis=1)\n', (5649, 5713), True, 'import tensorflow as tf\n'), ((5941, 5999), 'tensorflow.matmul', 'tf.matmul', (['concat_input', "self.weights['concat_projection']"], {}), "(concat_input, self.weights['concat_projection'])\n", (5950, 5999), True, 'import tensorflow as tf\n'), ((6121, 6144), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['self.out'], {}), '(self.out)\n', (6134, 6144), True, 'import tensorflow as tf\n'), ((6173, 6213), 'tensorflow.losses.log_loss', 'tf.losses.log_loss', (['self.label', 'self.out'], {}), '(self.label, self.out)\n', (6191, 6213), True, 'import tensorflow as tf\n'), ((8384, 8417), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8415, 8417), True, 'import tensorflow as tf\n'), ((9312, 9351), 'os.path.join', 'os.path.join', (['config.SUB_DIR', '"""model/0"""'], {}), "(config.SUB_DIR, 'model/0')\n", (9324, 9351), False, 'import os\n'), ((11359, 11454), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'glorot', 'size': '(self.deep_layers[i - 1], self.deep_layers[i])'}), '(loc=0, scale=glorot, size=(self.deep_layers[i - 1], self.\n deep_layers[i]))\n', (11375, 11454), True, 'import numpy as np\n'), ((11597, 11665), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'glorot', 'size': '(1, self.deep_layers[i])'}), '(loc=0, scale=glorot, size=(1, self.deep_layers[i]))\n', (11613, 11665), True, 'import numpy as np\n'), ((19815, 19850), 'numpy.reshape', 'np.reshape', (['batch_out', '(num_batch,)'], {}), '(batch_out, (num_batch,))\n', (19825, 19850), True, 'import numpy as np\n'), ((5053, 5105), 'tensorflow.matmul', 'tf.matmul', (['self.y_deep', "self.weights['layer_%d' % i]"], {}), "(self.y_deep, self.weights['layer_%d' % i])\n", (5062, 5105), True, 'import tensorflow as tf\n'), ((5775, 5835), 'tensorflow.concat', 'tf.concat', (['[self.y_first_order, self.y_second_order]'], {'axis': '(1)'}), '([self.y_first_order, self.y_second_order], axis=1)\n', (5784, 5835), True, 'import tensorflow as tf\n'), ((6437, 6482), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['self.l2_reg'], {}), '(self.l2_reg)\n', (6469, 6482), True, 'import tensorflow as tf\n'), ((6298, 6331), 'tensorflow.subtract', 'tf.subtract', (['self.label', 'self.out'], {}), '(self.label, self.out)\n', (6309, 6331), True, 'import tensorflow as tf\n'), ((6964, 7064), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate', 'beta1': '(0.9)', 'beta2': '(0.999)', 'epsilon': '(1e-08)'}), '(learning_rate=self.learning_rate, beta1=0.9, beta2=\n 0.999, epsilon=1e-08)\n', (6986, 7064), True, 'import tensorflow as tf\n'), ((19918, 19953), 'numpy.reshape', 'np.reshape', (['batch_out', '(num_batch,)'], {}), '(batch_out, (num_batch,))\n', (19928, 19953), True, 'import numpy as np\n'), ((6669, 6714), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['self.l2_reg'], {}), '(self.l2_reg)\n', (6701, 6714), True, 'import tensorflow as tf\n'), ((7219, 7315), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', ([], {'learning_rate': 'self.learning_rate', 'initial_accumulator_value': '(1e-08)'}), '(learning_rate=self.learning_rate,\n initial_accumulator_value=1e-08)\n', (7244, 7315), True, 'import tensorflow as tf\n'), ((7469, 7536), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (7502, 7536), True, 'import tensorflow as tf\n'), ((7642, 7717), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': 'self.learning_rate', 'momentum': '(0.95)'}), '(learning_rate=self.learning_rate, momentum=0.95)\n', (7668, 7717), True, 'import tensorflow as tf\n'), ((16419, 16425), 'time.time', 'time', ([], {}), '()\n', (16423, 16425), False, 'from time import time\n'), ((16567, 16573), 'time.time', 'time', ([], {}), '()\n', (16571, 16573), False, 'from time import time\n')]
|
#!/usr/bin/env python3
from KorAPClient import KorAPClient, KorAPConnection
import plotly.express as px
QUERY = "Hello World"
YEARS = range(2010, 2019)
COUNTRIES = ["DE", "CH"]
kcon = KorAPConnection(verbose=True)
vcs = ["textType=/Zeit.*/ & pubPlaceKey=" + c + " & pubDate in " + str(y) for c in COUNTRIES for y in YEARS]
df = KorAPClient.ipm(kcon.frequencyQuery(QUERY, vcs))
print(df)
df['Year'] = [y for c in COUNTRIES for y in YEARS]
df['Country'] = [c for c in COUNTRIES for y in YEARS]
fig = px.line(df, title=QUERY, x="Year", y="ipm", color="Country",
error_y="conf.high", error_y_minus="conf.low")
fig.show()
|
[
"plotly.express.line",
"KorAPClient.KorAPConnection"
] |
[((186, 215), 'KorAPClient.KorAPConnection', 'KorAPConnection', ([], {'verbose': '(True)'}), '(verbose=True)\n', (201, 215), False, 'from KorAPClient import KorAPClient, KorAPConnection\n'), ((503, 615), 'plotly.express.line', 'px.line', (['df'], {'title': 'QUERY', 'x': '"""Year"""', 'y': '"""ipm"""', 'color': '"""Country"""', 'error_y': '"""conf.high"""', 'error_y_minus': '"""conf.low"""'}), "(df, title=QUERY, x='Year', y='ipm', color='Country', error_y=\n 'conf.high', error_y_minus='conf.low')\n", (510, 615), True, 'import plotly.express as px\n')]
|
import traceback
import mysql.connector
from ..database.database import *
from ..model.Book import *
class BorrowTools:
def BookData(self, idReader):
db = DatabaseTools()
conn = db.getConn()
result_set = None
ls = []
try :
sql = "select book.idBook,nameBook,price,book.kind,author,publisher from reader,borrow,book where book.idBook = borrow.idBook and reader.idReader = borrow.idReader and reader.idReader = %s "
answer = (str(idReader),)
mycursor = conn.cursor()
mycursor.execute(sql,answer)
result_set = mycursor.fetchall()
for row in result_set:
book = Book()
book.setIdBook(row[0])
book.setNameBook(row[1])
book.setPrice(row[2])
book.setType(row[3])
book.setAuthor(row[4])
book.setPublisher(row[5])
ls.append(book.list_return())
mycursor.close()
conn.close()
except Exception as e:
traceback.print_exc()
return ls
def BookData_Search_idBook(self, idBook):
db = DatabaseTools()
conn = db.getConn()
result_set = None
ls = []
try :
sql = "select book.idBook,nameBook,price,book.kind,author,publisher from book where book.idBook = %s"
answer = (str(idBook),)
mycursor = conn.cursor()
mycursor.execute(sql,answer)
result_set = mycursor.fetchall()
for row in result_set:
book = Book()
book.setIdBook(row[0])
book.setNameBook(row[1])
book.setPrice(row[2])
book.setType(row[3])
book.setAuthor(row[4])
book.setPublisher(row[5])
ls.append(book.list_return())
mycursor.close()
conn.close()
except Exception as e:
traceback.print_exc()
return ls
def whetherInStock(self, idBook):
db = DatabaseTools()
conn = db.getConn()
try :
sql = "select * from borrow"
mycursor = conn.cursor()
mycursor.execute(sql)
result_set = mycursor.fetchall()
for row in result_set :
if row[1] != None :
if row[1] == idBook :
return False
mycursor.close()
conn.close()
except Exception as e:
traceback.print_exc()
return True
def BorrowBook(self, idReader, idBook):
i = 0
db = DatabaseTools()
conn = db.getConn()
try :
sql = "insert into borrow (idReader,idbook,lendDate,dueDate,overtime) values (%s,%s,CURRENT_DATE(),DATE_ADD(CURRENT_DATE(),INTERVAL 2 MONTH),'否')"
answer = (str(idReader),str(idBook))
mycursor = conn.cursor()
mycursor.execute(sql,answer)
i = mycursor.rowcount
mycursor.close()
conn.commit()
conn.close()
except Exception as e:
traceback.print_exc()
return i
def ReturnBook(self, idBook):
i = 0
db = DatabaseTools()
conn = db.getConn()
try :
sql = "delete from Borrow where idBook= %s"
answer = (str(idBook),)
mycursor = conn.cursor()
mycursor.execute(sql,answer)
i = mycursor.rowcount
mycursor.close()
conn.commit()
conn.close()
except Exception as e:
traceback.print_exc()
return i
|
[
"traceback.print_exc"
] |
[((1081, 1102), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1100, 1102), False, 'import traceback\n'), ((2003, 2024), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2022, 2024), False, 'import traceback\n'), ((2563, 2584), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2582, 2584), False, 'import traceback\n'), ((3182, 3203), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3201, 3203), False, 'import traceback\n'), ((3672, 3693), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3691, 3693), False, 'import traceback\n')]
|
# coding: utf-8
"""
Idomoo API
OpenAPI spec version: 2.0
Contact: <EMAIL>
"""
import pprint
import six
class JPGOutput(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'height': 'int',
'time': 'float',
'suffix': 'str',
'overlay': 'str',
'overlay_alignment': 'list[str]',
'overlay_scale': 'str',
'label': 'str'
}
attribute_map = {
'height': 'height',
'time': 'time',
'suffix': 'suffix',
'overlay': 'overlay',
'overlay_alignment': 'overlay_alignment',
'overlay_scale': 'overlay_scale',
'label': 'label'
}
def __init__(self, height=None, time=None, suffix=None, overlay=None, overlay_alignment=None, overlay_scale='fit',
label=None):
"""JPGOutput - a model defined in Swagger"""
self._height = None
self._time = None
self._suffix = None
self._overlay = None
self._overlay_alignment = None
self._overlay_scale = None
self._label = None
self.discriminator = None
self.height = height
self.time = time
if suffix is not None:
self.suffix = suffix
if overlay is not None:
self.overlay = overlay
if overlay_alignment is not None:
self.overlay_alignment = overlay_alignment
if overlay_scale is not None:
self.overlay_scale = overlay_scale
if label is not None:
self.label = label
@property
def height(self):
"""Gets the height of this JPGOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:return: The height of this JPGOutput.
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this JPGOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:param height: The height of this JPGOutput.
:type: int
"""
if height is None:
raise ValueError("Invalid value for `height`, must not be `None`")
self._height = height
@property
def time(self):
"""Gets the time of this JPGOutput.
The frame of the video to render. Can also be negative number that will be calculated from the end of the video.
:return: The time of this JPGOutput.
:rtype: float
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this JPGOutput.
The frame of the video to render. Can also be negative number that will be calculated from the end of the video.
:param time: The time of this JPGOutput.
:type: float
"""
if time is None:
raise ValueError("Invalid value for `time`, must not be `None`")
self._time = time
@property
def suffix(self):
"""Gets the suffix of this JPGOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:return: The suffix of this JPGOutput.
:rtype: str
"""
return self._suffix
@suffix.setter
def suffix(self, suffix):
"""Sets the suffix of this JPGOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:param suffix: The suffix of this JPGOutput.
:type: str
"""
self._suffix = suffix
@property
def overlay(self):
"""Gets the overlay of this JPGOutput.
Path to overlay image, such as: play button or watermark.
:return: The overlay of this JPGOutput.
:rtype: str
"""
return self._overlay
@overlay.setter
def overlay(self, overlay):
"""Sets the overlay of this JPGOutput.
Path to overlay image, such as: play button or watermark.
:param overlay: The overlay of this JPGOutput.
:type: str
"""
self._overlay = overlay
@property
def overlay_alignment(self):
"""Gets the overlay_alignment of this JPGOutput.
Alignment for overlay image in case the image doesn’t fit the video perfectly. The first item in the array is
X. The second is Y.
:return: The overlay_alignment of this JPGOutput.
:rtype: list[str]
"""
return self._overlay_alignment
@overlay_alignment.setter
def overlay_alignment(self, overlay_alignment):
"""Sets the overlay_alignment of this JPGOutput.
Alignment for overlay image in case the image doesn’t fit the video perfectly. The first item in the array is
X. The second is Y.
:param overlay_alignment: The overlay_alignment of this JPGOutput.
:type: list[str]
"""
allowed_values = ["left", "center", "right", "top", "middle", "bottom"]
if not set(overlay_alignment).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]"
.format(", ".join(map(str, set(overlay_alignment) - set(allowed_values))),
", ".join(map(str, allowed_values)))
)
self._overlay_alignment = overlay_alignment
@property
def overlay_scale(self):
"""Gets the overlay_scale of this JPGOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:return: The overlay_scale of this JPGOutput.
:rtype: str
"""
return self._overlay_scale
@overlay_scale.setter
def overlay_scale(self, overlay_scale):
"""Sets the overlay_scale of this JPGOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:param overlay_scale: The overlay_scale of this JPGOutput.
:type: str
"""
allowed_values = ["fit", "fill", "none"]
if overlay_scale not in allowed_values:
raise ValueError(
"Invalid value for `overlay_scale` ({0}), must be one of {1}"
.format(overlay_scale, allowed_values)
)
self._overlay_scale = overlay_scale
@property
def label(self):
"""Gets the label of this JPGOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:return: The label of this JPGOutput.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this JPGOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:param label: The label of this JPGOutput.
:type: str
"""
self._label = label
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JPGOutput):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((8785, 8818), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (8798, 8818), False, 'import six\n')]
|
import os
from os import listdir
from os.path import isfile
from ScriptComponent.ScriptConfig import ScriptConfig
class PythonInstaller(object):
script_config = ScriptConfig()
def install(self, eigenschap):
self.upgradePip()
self.installTarball()
self.installLocalLibrary(eigenschap)
self.getFilesFromDirectory(self.script_config.get_location())
def upgradePip(self):
os.system("python -m pip install --upgrade pip")
def installTarball(self):
os.system("pip install download-tarball")
def installLocalLibrary(self, eigenschap):
libraryPath = self.script_config.get_location() + os.path.splitext(eigenschap)[0] + "\\Libraries\\"
libraries = self.getFilesFromDirectory(libraryPath)
for library in libraries:
os.system("pip install \"" + libraryPath + library + "/\"")
def getFilesFromDirectory(self, scriptPath):
fileList = []
for file in listdir(scriptPath):
if isfile(scriptPath + file):
fileList.append(file)
return fileList
|
[
"os.system",
"os.path.isfile",
"os.path.splitext",
"ScriptComponent.ScriptConfig.ScriptConfig",
"os.listdir"
] |
[((168, 182), 'ScriptComponent.ScriptConfig.ScriptConfig', 'ScriptConfig', ([], {}), '()\n', (180, 182), False, 'from ScriptComponent.ScriptConfig import ScriptConfig\n'), ((425, 473), 'os.system', 'os.system', (['"""python -m pip install --upgrade pip"""'], {}), "('python -m pip install --upgrade pip')\n", (434, 473), False, 'import os\n'), ((513, 554), 'os.system', 'os.system', (['"""pip install download-tarball"""'], {}), "('pip install download-tarball')\n", (522, 554), False, 'import os\n'), ((970, 989), 'os.listdir', 'listdir', (['scriptPath'], {}), '(scriptPath)\n', (977, 989), False, 'from os import listdir\n'), ((818, 875), 'os.system', 'os.system', (['(\'pip install "\' + libraryPath + library + \'/"\')'], {}), '(\'pip install "\' + libraryPath + library + \'/"\')\n', (827, 875), False, 'import os\n'), ((1006, 1031), 'os.path.isfile', 'isfile', (['(scriptPath + file)'], {}), '(scriptPath + file)\n', (1012, 1031), False, 'from os.path import isfile\n'), ((662, 690), 'os.path.splitext', 'os.path.splitext', (['eigenschap'], {}), '(eigenschap)\n', (678, 690), False, 'import os\n')]
|
#!/usr/bin/python
from cobra.internal.codec.jsoncodec import (parseJSONError, fromJSONStr, toJSONStr)
from cobra.mit.access import MoDirectory
from cobra.mit.session import LoginSession
from cobra.mit.request import ClassQuery
import os
import re
import sys
import six
import json
import urllib3
import socket
import requests
import argparse
import shortuuid
import getpass
from configparser import ConfigParser
from .sync import apply_desired_state
BRAHMA_FQDN = os.getenv('BRAHMA_URL', 'brahma.cisco.com')
BRAHMA_PORT = os.getenv('BRAHMA_PORT', None)
BRAHMA_URL = 'http://' + BRAHMA_FQDN
if(BRAHMA_PORT != None):
BRAHMA_URL += ':' + BRAHMA_PORT
SERVER_URL = BRAHMA_URL + '/api/fabric'
IP_ADDR = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]).){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
HOST_FQDN = "^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9-]*[a-zA-Z0-9]).)*([A-Za-z]|[A-Za-z][A-Za-z0-9-]*[A-Za-z0-9])$"
# Original Regex Strings
# IP_ADDR = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
# HOST_FQDN = "^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$"
class FabricConnection():
# Fabric Connection
def __init__(self, sourcePort, destSwitch, destPort):
self.sourcePort = sourcePort
self.destSwitch = destSwitch
self.destPort = destPort
class FabricNode():
# Fabric Node
def __init__(self, switchName, switchRn, switchRole, switchSerial, switchModel, switchID):
self.name = switchName
self.rn = switchRn
self.role = switchRole
self.serial = switchSerial
self.model = switchModel
self.id = switchID
self.connections = []
def add_connection(self, sourcePort, destPort, destSwitch):
# Add Connection
new_connection = FabricConnection(sourcePort, destPort, destSwitch)
self.connections.append(new_connection)
class FabricTopology():
# Fabric Topology
def __init__(self):
self.token = shortuuid.uuid()
self.nodes = []
self.state = {}
def add_node(self, switchName, switchRn, switchRole, switchSerial, switchModel, switchID):
new_node = FabricNode(switchName, switchRn, switchRole, switchSerial, switchModel, switchID)
self.nodes.append(new_node)
def get_nodes(self):
node_list = []
for node in self.nodes:
node_list.append(node)
return node_list
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def main():
urllib3.disable_warnings()
args = parseArgs()
if(args.new):
newFabric()
if(args.apply):
applyConfig()
def parseArgs():
arg_parser = argparse.ArgumentParser(prog='brahma', description='Brahma CLI Utility')
arg_parser.version = '1.0'
arg_parser.add_argument('-n', dest='new', action='store_true', help='New Fabric', required=False)
arg_parser.add_argument('-a', dest='apply', action='store_true', help='Apply Config', required=False)
arg_parser.add_argument('-v', action='version', help='Show Version')
args = arg_parser.parse_args()
return args
def aciLogin():
print('')
apic_addr = six.moves.input("APIC IP Address/FQDN: ")
is_ip = re.match(IP_ADDR, apic_addr)
is_host = re.match(HOST_FQDN, apic_addr)
if (is_ip):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
try:
s.connect((apic_addr,443))
except:
print("APIC IP Address unreachable!")
sys.exit()
if (is_host):
try:
socket.gethostbyname(apic_addr)
except socket.gaierror:
print("APIC FQDN unreachable!")
sys.exit()
if not is_ip and not is_host:
print("Not a valid IP Address or FQDN!")
sys.exit()
apic_user = six.moves.input("APIC Username: ")
apic_pass = getpass.getpass(prompt='APIC Password: ', stream=None)
apic_url = "https://" + apic_addr
session = LoginSession(apic_url, apic_user, apic_pass)
moDir = MoDirectory(session)
try:
moDir.login()
except requests.exceptions.RequestException:
print("Cannot connect to APIC!")
sys.exit()
return moDir
def applyConfig():
moDir = aciLogin()
fabric_token = six.moves.input("Fabric Token: ")
apply_url = SERVER_URL + '/' + fabric_token
print(apply_url)
req = requests.get(url = apply_url)
req_json = json.loads(req.content)
state = req_json['state']
# req_json = req.json()
# parsed_json = (json.load(req_json['state']))
# print(req)
apply_desired_state(moDir, state)
def newFabric():
moDir = aciLogin()
cq = ClassQuery('fabricNode')
nodes = moDir.query(cq)
fNodes = FabricTopology()
for node in nodes:
if(node.role == 'spine' or node.role=='leaf' or node.role=='controller'):
node_name = str(node.name)
node_real = str(node.rn)
node_role = str(node.role)
node_serial = str(node.serial)
node_model = str(node.model)
node_id = str(node.id)
fNodes.add_node(node_name, node_real, node_role, node_serial, node_model, node_id)
# print('')
for fNode in fNodes.get_nodes():
# print('{}({}) -> Role: {}'.format(fNode.name, fNode.serial, fNode.role))
# print('-----')
nodeDn = 'topology/pod-1/' + fNode.rn + '/sys'
intfs = moDir.lookupByClass("l1PhysIf", parentDn=nodeDn)
for intf in intfs:
if(intf.usage == 'fabric') and (intf.switchingSt == 'enabled'):
int_id = str(intf.id)
lldpDn = nodeDn + '/lldp/inst/if-[' + int_id + ']'
lldpns = moDir.lookupByClass("lldpAdjEp", parentDn=lldpDn)
for lldpn in lldpns:
dest_name = lldpn.sysName
port_desc = lldpn.portDesc
dest_length = len(port_desc) - 1
left_bracket = port_desc.find('[') - dest_length
dest_port = port_desc[left_bracket:-1]
fNode.add_connection(int_id, dest_name, dest_port)
# print('{} -> {}({})'.format(int_id, dest_name, dest_port))
# print('')
# print('--------------------------')
# print('')
node_json = fNodes.toJSON()
# print(node_json)
json_data = json.loads(node_json)
req = requests.post(url = SERVER_URL, json = json_data)
# print(req.status_code)
resp_json = json.loads(req.text)
# print(json.dumps(resp_json, indent=4, sort_keys=False))
print('')
print('Fabric Uploaded!')
print('')
print('Token: {}'.format(resp_json['token']))
print('')
print('Please navigate to {} and use the above token.'.format(BRAHMA_URL))
print('')
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"json.loads",
"shortuuid.uuid",
"getpass.getpass",
"socket.socket",
"cobra.mit.access.MoDirectory",
"re.match",
"json.dumps",
"urllib3.disable_warnings",
"socket.gethostbyname",
"cobra.mit.request.ClassQuery",
"requests.get",
"cobra.mit.session.LoginSession",
"requests.post",
"six.moves.input",
"os.getenv",
"sys.exit"
] |
[((492, 535), 'os.getenv', 'os.getenv', (['"""BRAHMA_URL"""', '"""brahma.cisco.com"""'], {}), "('BRAHMA_URL', 'brahma.cisco.com')\n", (501, 535), False, 'import os\n'), ((551, 581), 'os.getenv', 'os.getenv', (['"""BRAHMA_PORT"""', 'None'], {}), "('BRAHMA_PORT', None)\n", (560, 581), False, 'import os\n'), ((2609, 2635), 'urllib3.disable_warnings', 'urllib3.disable_warnings', ([], {}), '()\n', (2633, 2635), False, 'import urllib3\n'), ((2772, 2844), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""brahma"""', 'description': '"""Brahma CLI Utility"""'}), "(prog='brahma', description='Brahma CLI Utility')\n", (2795, 2844), False, 'import argparse\n'), ((3253, 3294), 'six.moves.input', 'six.moves.input', (['"""APIC IP Address/FQDN: """'], {}), "('APIC IP Address/FQDN: ')\n", (3268, 3294), False, 'import six\n'), ((3308, 3336), 're.match', 're.match', (['IP_ADDR', 'apic_addr'], {}), '(IP_ADDR, apic_addr)\n', (3316, 3336), False, 'import re\n'), ((3350, 3380), 're.match', 're.match', (['HOST_FQDN', 'apic_addr'], {}), '(HOST_FQDN, apic_addr)\n', (3358, 3380), False, 'import re\n'), ((3871, 3905), 'six.moves.input', 'six.moves.input', (['"""APIC Username: """'], {}), "('APIC Username: ')\n", (3886, 3905), False, 'import six\n'), ((3921, 3975), 'getpass.getpass', 'getpass.getpass', ([], {'prompt': '"""APIC Password: """', 'stream': 'None'}), "(prompt='APIC Password: ', stream=None)\n", (3936, 3975), False, 'import getpass\n'), ((4031, 4075), 'cobra.mit.session.LoginSession', 'LoginSession', (['apic_url', 'apic_user', 'apic_pass'], {}), '(apic_url, apic_user, apic_pass)\n', (4043, 4075), False, 'from cobra.mit.session import LoginSession\n'), ((4087, 4107), 'cobra.mit.access.MoDirectory', 'MoDirectory', (['session'], {}), '(session)\n', (4098, 4107), False, 'from cobra.mit.access import MoDirectory\n'), ((4321, 4354), 'six.moves.input', 'six.moves.input', (['"""Fabric Token: """'], {}), "('Fabric Token: ')\n", (4336, 4354), False, 'import six\n'), ((4437, 4464), 'requests.get', 'requests.get', ([], {'url': 'apply_url'}), '(url=apply_url)\n', (4449, 4464), False, 'import requests\n'), ((4481, 4504), 'json.loads', 'json.loads', (['req.content'], {}), '(req.content)\n', (4491, 4504), False, 'import json\n'), ((4720, 4744), 'cobra.mit.request.ClassQuery', 'ClassQuery', (['"""fabricNode"""'], {}), "('fabricNode')\n", (4730, 4744), False, 'from cobra.mit.request import ClassQuery\n'), ((6275, 6296), 'json.loads', 'json.loads', (['node_json'], {}), '(node_json)\n', (6285, 6296), False, 'import json\n'), ((6308, 6353), 'requests.post', 'requests.post', ([], {'url': 'SERVER_URL', 'json': 'json_data'}), '(url=SERVER_URL, json=json_data)\n', (6321, 6353), False, 'import requests\n'), ((6403, 6423), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (6413, 6423), False, 'import json\n'), ((2066, 2082), 'shortuuid.uuid', 'shortuuid.uuid', ([], {}), '()\n', (2080, 2082), False, 'import shortuuid\n'), ((2518, 2590), 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda o: o.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), '(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)\n', (2528, 2590), False, 'import json\n'), ((3407, 3456), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (3420, 3456), False, 'import socket\n'), ((3843, 3853), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3851, 3853), False, 'import sys\n'), ((3639, 3670), 'socket.gethostbyname', 'socket.gethostbyname', (['apic_addr'], {}), '(apic_addr)\n', (3659, 3670), False, 'import socket\n'), ((4228, 4238), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4236, 4238), False, 'import sys\n'), ((3592, 3602), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3600, 3602), False, 'import sys\n'), ((3746, 3756), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3754, 3756), False, 'import sys\n')]
|
'''
DIO using a serial port + microcontroller instead of the NIDAQ card
'''
import serial
from collections import defaultdict
import struct
from numpy import binary_repr
from .dio.parse import MSG_TYPE_ROWBYTE, MSG_TYPE_REGISTER
import time
import threading
import pyfirmata
def construct_word(aux, msg_type, data, n_bits_data=8, n_bits_msg_type=3):
word = (aux << (n_bits_data + n_bits_msg_type)) | (msg_type << n_bits_data) | data
return word
def parse_word(word, n_bits_data=8, n_bits_msg_type=3):
data = word & ((1 << n_bits_data) - 1)
msg_type = (word >> n_bits_data) & ((1 << n_bits_msg_type) - 1)
aux = word >> n_bits_msg_type + n_bits_data
return aux, msg_type, data
baudrate = 115200
class SendRowByte(object):
'''
Send only an 8-bit data word corresponding to the 8 lower
bits of the current row number of the HDF table
'''
'''
Interface for sending all the task-generated data through the NIDAQ interface card
'''
def __init__(self, device=None):
'''
Constructor for SendRowByte
Parameters
----------
device : string, optional
Linux name of the serial port for the Arduino board, defined by setserial
Returns
-------
SendAll instance
'''
self.systems = dict()
self.port = serial.Serial('/dev/arduino_neurosync', baudrate=baudrate)
self.n_systems = 0
self.rowcount = defaultdict(int)
def close(self):
'''
Release access to the Arduino serial port
'''
# stop recording
self.port.write('p')
self.port.close()
def register(self, system, dtype):
'''
Send information about the registration system (name and datatype) in string form, one byte at a time.
Parameters
----------
system : string
Name of the system being registered
dtype : np.dtype instance
Datatype of incoming data, for later decoding of the binary data during analysis
Returns
-------
None
'''
# Save the index of the system being registered (arbitrary number corresponding to the order in which systems were registered)
self.n_systems += 1
self.systems[system] = self.n_systems
# if self.n_systems > 1:
# raise Exception("This currently only works for one system!")
#print "System Register: %s" % system, self.systems[system]
#print "Arduino register %s" % system, self.systems[system]
#if self.n_systems > 1:
# raise Exception("This currently only works for one system!")
print("Arduino register %s" % system, self.systems[system])
for sys_name_chr in system:
reg_word = construct_word(self.systems[system], MSG_TYPE_REGISTER, ord(sys_name_chr))
self._send_data_word_to_serial_port(reg_word)
null_term_word = construct_word(self.systems[system], MSG_TYPE_REGISTER, 0) # data payload is 0 for null terminator
self._send_data_word_to_serial_port(null_term_word)
def sendMsg(self, msg):
'''
Do nothing. Messages are stored with row numbers in the HDF table, so no need to also send the message over to the recording system.
Parameters
----------
msg : string
Message to send
Returns
-------
None
'''
# there's no point in sending a message, since every message is
# stored in the HDF table anyway with a row number,
# and every row number is automatically synced.
pass
def send(self, system, data):
'''
Send the row number for a data word to the neural system
Parameters
----------
system : string
Name of system
data : object
This is unused. Only used in the parent's version where the actual data, and not just the HDF row number, is sent.
Returns
-------
None
'''
if not (system in self.systems):
# if the system is not registered, do nothing
return
current_sys_rowcount = self.rowcount[system]
self.rowcount[system] += 1
# construct the data packet
word = construct_word(self.systems[system], MSG_TYPE_ROWBYTE, current_sys_rowcount % 256)
self._send_data_word_to_serial_port(word)
# if verbose:
# print binary_repr(word, 16)
# word_str = 'd' + struct.pack('<H', word)
# self.port.write(word_str)
def _send_data_word_to_serial_port(self, word, verbose=False):
#self.port.write(word)
if verbose:
print(binary_repr(word, 16))
word_str = 'd' + struct.pack('<H', word)
self.port.write(word_str)
|
[
"serial.Serial",
"collections.defaultdict",
"numpy.binary_repr",
"struct.pack"
] |
[((1347, 1405), 'serial.Serial', 'serial.Serial', (['"""/dev/arduino_neurosync"""'], {'baudrate': 'baudrate'}), "('/dev/arduino_neurosync', baudrate=baudrate)\n", (1360, 1405), False, 'import serial\n'), ((1457, 1473), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1468, 1473), False, 'from collections import defaultdict\n'), ((4815, 4838), 'struct.pack', 'struct.pack', (['"""<H"""', 'word'], {}), "('<H', word)\n", (4826, 4838), False, 'import struct\n'), ((4767, 4788), 'numpy.binary_repr', 'binary_repr', (['word', '(16)'], {}), '(word, 16)\n', (4778, 4788), False, 'from numpy import binary_repr\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# import argparse
# import itertools
import json
import os
import subprocess
# import sys
# import time
import unittest
import uuid
import torch
# import datasets
# import hparams_registry
# import algorithms
# import networks
# from parameterized import parameterized
# import test.helpers
class TestTrain(unittest.TestCase):
def test_end_to_end(self):
"""Test that train.py successfully completes one step"""
output_dir = os.path.join('/tmp', str(uuid.uuid4()))
os.makedirs(output_dir, exist_ok=True)
subprocess.run(f'python -m domainbed.scripts.train --dataset RotatedMNIST '
f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} '
f'--steps=501', shell=True)
with open(os.path.join(output_dir, 'results.jsonl')) as f:
lines = [l[:-1] for l in f]
last_epoch = json.loads(lines[-1])
self.assertEqual(last_epoch['step'], 500)
# Conservative values; anything lower and something's likely wrong.
self.assertGreater(last_epoch['env0_in_acc'], 0.80)
self.assertGreater(last_epoch['env1_in_acc'], 0.95)
self.assertGreater(last_epoch['env2_in_acc'], 0.95)
self.assertGreater(last_epoch['env3_in_acc'], 0.95)
self.assertGreater(last_epoch['env3_in_acc'], 0.95)
with open(os.path.join(output_dir, 'out.txt')) as f:
text = f.read()
self.assertTrue('500' in text)
|
[
"subprocess.run",
"uuid.uuid4",
"os.makedirs",
"json.loads",
"os.path.join"
] |
[((569, 607), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (580, 607), False, 'import os\n'), ((617, 788), 'subprocess.run', 'subprocess.run', (['f"""python -m domainbed.scripts.train --dataset RotatedMNIST --data_dir={os.environ[\'DATA_DIR\']} --output_dir={output_dir} --steps=501"""'], {'shell': '(True)'}), '(\n f"python -m domainbed.scripts.train --dataset RotatedMNIST --data_dir={os.environ[\'DATA_DIR\']} --output_dir={output_dir} --steps=501"\n , shell=True)\n', (631, 788), False, 'import subprocess\n'), ((944, 965), 'json.loads', 'json.loads', (['lines[-1]'], {}), '(lines[-1])\n', (954, 965), False, 'import json\n'), ((546, 558), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (556, 558), False, 'import uuid\n'), ((830, 871), 'os.path.join', 'os.path.join', (['output_dir', '"""results.jsonl"""'], {}), "(output_dir, 'results.jsonl')\n", (842, 871), False, 'import os\n'), ((1439, 1474), 'os.path.join', 'os.path.join', (['output_dir', '"""out.txt"""'], {}), "(output_dir, 'out.txt')\n", (1451, 1474), False, 'import os\n')]
|
import torch
import torch.nn as nn
import torch.nn.init as nn_init
from .misc import conv, deconv, predict_flow
'Parameter count : 38,676,504 '
class FlowNetS(nn.Module):
def __init__(self, input_channels=12, with_bn=True):
super(FlowNetS, self).__init__()
self.with_bn = with_bn
self.conv1 = conv(
input_channels, 64, kernel_size=7, stride=2, with_bn=with_bn)
self.conv2 = conv(64, 128, kernel_size=5, stride=2, with_bn=with_bn)
self.conv3 = conv(128, 256, kernel_size=5, stride=2, with_bn=with_bn)
self.conv3_1 = conv(256, 256, with_bn=with_bn)
self.conv4 = conv(256, 512, stride=2, with_bn=with_bn)
self.conv4_1 = conv(512, 512, with_bn=with_bn)
self.conv5 = conv(512, 512, stride=2, with_bn=with_bn)
self.conv5_1 = conv(512, 512, with_bn=with_bn)
self.conv6 = conv(512, 1024, stride=2, with_bn=with_bn)
self.conv6_1 = conv(1024, 1024, with_bn=with_bn)
self.deconv5 = deconv(1024, 512)
self.deconv4 = deconv(1026, 256)
self.deconv3 = deconv(770, 128)
self.deconv2 = deconv(386, 64)
self.predict_flow6 = predict_flow(1024)
self.predict_flow5 = predict_flow(1026)
self.predict_flow4 = predict_flow(770)
self.predict_flow3 = predict_flow(386)
self.predict_flow2 = predict_flow(194)
self.upsampled_flow6_to_5 = nn.ConvTranspose2d(
2, 2, 4, 2, 1, bias=False)
self.upsampled_flow5_to_4 = nn.ConvTranspose2d(
2, 2, 4, 2, 1, bias=False)
self.upsampled_flow4_to_3 = nn.ConvTranspose2d(
2, 2, 4, 2, 1, bias=False)
self.upsampled_flow3_to_2 = nn.ConvTranspose2d(
2, 2, 4, 2, 1, bias=False)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn_init.uniform(m.bias)
nn_init.xavier_uniform(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
nn_init.uniform(m.bias)
nn_init.xavier_uniform(m.weight)
def forward(self, x):
out_conv1 = self.conv1(x)
out_conv2 = self.conv2(out_conv1)
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5, out_deconv5, flow6_up), 1)
flow5 = self.predict_flow5(concat5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4, out_deconv4, flow5_up), 1)
flow4 = self.predict_flow4(concat4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3, out_deconv3, flow4_up), 1)
flow3 = self.predict_flow3(concat3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2, out_deconv2, flow3_up), 1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2, flow3, flow4, flow5, flow6
else:
return flow2,
|
[
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.init.xavier_uniform",
"torch.nn.Upsample",
"torch.nn.init.uniform"
] |
[((1412, 1457), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(2)', '(2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(2, 2, 4, 2, 1, bias=False)\n', (1430, 1457), True, 'import torch.nn as nn\n'), ((1507, 1552), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(2)', '(2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(2, 2, 4, 2, 1, bias=False)\n', (1525, 1552), True, 'import torch.nn as nn\n'), ((1602, 1647), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(2)', '(2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(2, 2, 4, 2, 1, bias=False)\n', (1620, 1647), True, 'import torch.nn as nn\n'), ((1697, 1742), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(2)', '(2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(2, 2, 4, 2, 1, bias=False)\n', (1715, 1742), True, 'import torch.nn as nn\n'), ((1782, 1826), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(4)', 'mode': '"""bilinear"""'}), "(scale_factor=4, mode='bilinear')\n", (1793, 1826), True, 'import torch.nn as nn\n'), ((2709, 2757), 'torch.cat', 'torch.cat', (['(out_conv5, out_deconv5, flow6_up)', '(1)'], {}), '((out_conv5, out_deconv5, flow6_up), 1)\n', (2718, 2757), False, 'import torch\n'), ((2917, 2965), 'torch.cat', 'torch.cat', (['(out_conv4, out_deconv4, flow5_up)', '(1)'], {}), '((out_conv4, out_deconv4, flow5_up), 1)\n', (2926, 2965), False, 'import torch\n'), ((3125, 3173), 'torch.cat', 'torch.cat', (['(out_conv3, out_deconv3, flow4_up)', '(1)'], {}), '((out_conv3, out_deconv3, flow4_up), 1)\n', (3134, 3173), False, 'import torch\n'), ((3333, 3381), 'torch.cat', 'torch.cat', (['(out_conv2, out_deconv2, flow3_up)', '(1)'], {}), '((out_conv2, out_deconv2, flow3_up), 1)\n', (3342, 3381), False, 'import torch\n'), ((2001, 2033), 'torch.nn.init.xavier_uniform', 'nn_init.xavier_uniform', (['m.weight'], {}), '(m.weight)\n', (2023, 2033), True, 'import torch.nn.init as nn_init\n'), ((2184, 2216), 'torch.nn.init.xavier_uniform', 'nn_init.xavier_uniform', (['m.weight'], {}), '(m.weight)\n', (2206, 2216), True, 'import torch.nn.init as nn_init\n'), ((1961, 1984), 'torch.nn.init.uniform', 'nn_init.uniform', (['m.bias'], {}), '(m.bias)\n', (1976, 1984), True, 'import torch.nn.init as nn_init\n'), ((2144, 2167), 'torch.nn.init.uniform', 'nn_init.uniform', (['m.bias'], {}), '(m.bias)\n', (2159, 2167), True, 'import torch.nn.init as nn_init\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright 2012 <NAME> (http://jamesthornton.com)
# BSD License (see LICENSE for details)
#
"""
An interface for interacting with indices on Rexster.
"""
from bulbs.utils import initialize_element, initialize_elements, get_one_result
class IndexProxy(object):
"""Abstract base class the index proxies."""
def __init__(self, index_class, client):
# The index class for this proxy, e.g. ManualIndex.
self.index_class = index_class
# The Client object for the database.
self.client = client
class VertexIndexProxy(IndexProxy):
"""
Manage vertex indices on Rexster.
:param index_class: The index class for this proxy, e.g. ManualIndex.
:type index_class: Index
:param client: The Client object for the database.
:type client: bulbs.rexster.client.RexsterClient
:ivar index_class: Index class.
:ivar client: RexsterClient object.
"""
def create(self, index_name):
"""
Creates an Vertex index and returns it.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.index.Index
"""
raise NotImplementedError
def get(self, index_name="vertex"):
"""
Returns the Index object with the specified name or None if not found.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.index.Index
"""
index = self.index_class(self.client, None)
index.base_type = "vertex"
index._index_name = index_name
self.client.registry.add_index(index_name, index)
return index
def get_or_create(self, index_name="vertex", index_params=None):
"""
Get a Vertex Index or create it if it doesn't exist.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.index.Index
"""
return self.get(index_name)
def delete(self, index_name):
"""
Deletes an index and returns the Response.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.client.RexsterResponse
"""
raise NotImplementedError
class EdgeIndexProxy(IndexProxy):
"""
Manage edge indices on Rexster.
:param index_class: The index class for this proxy, e.g. ManualIndex.
:type index_class: Index
:param client: The Client object for the database.
:type client: bulbs.rexster.client.RexsterClient
:ivar index_class: Index class.
:ivar client: RexsterClient object.
"""
def create(self,index_name,*args,**kwds):
"""
Adds an index to the database and returns it.
index_keys must be a string in this format: '[k1,k2]'
Don't pass actual list b/c keys get double quoted.
:param index_name: The name of the index to create.
:param index_class: The class of the elements stored in the index.
Either vertex or edge.
"""
raise NotImplementedError
def get(self, index_name="edge"):
"""
Returns the Index object with the specified name or None if not found.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.index.Index
"""
index = self.index_class(self.client, None)
index.base_type = "edge"
index._index_name = index_name
self.client.registry.add_index(index_name, index)
return index
def get_or_create(self, index_name="edge", index_params=None):
"""
Get an Edge Index or create it if it doesn't exist.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.index.Index
"""
return self.get(index_name)
def delete(self,index_name):
"""
Deletes an index and returns the Response.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.client.RexsterResponse
"""
raise NotImplementedError
#
# Index Containers (Titan only supports KeyIndex so far)
#
class Index(object):
"""Abstract base class for an index."""
def __init__(self, client, result):
self.client = client
self.result = result
self.base_type = None # set by Factory.get_index
self._index_name = None # ditto
# the index_name is actually ignored with Titan,
# but setting it like normal to make tests pass
@classmethod
def get_proxy_class(cls, base_type):
"""
Returns the IndexProxy class.
:param base_type: Index base type, either vertex or edge.
:type base_type: str
:rtype: class
"""
class_map = dict(vertex=VertexIndexProxy, edge=EdgeIndexProxy)
return class_map[base_type]
@property
def index_name(self):
"""
Returns the index name.
:rtype: str
"""
# faking the index name as "vertex"
return self._index_name
@property
def index_class(self):
"""
Returns the index class, either vertex or edge.
:rtype: class
"""
return self.base_type
@property
def index_type(self):
"""
Returns the index type, which will either be automatic or manual.
:rtype: str
"""
return "automatic"
def count(self,key=None,value=None,**pair):
"""
Return a count of all elements with 'key' equal to 'value' in the index.
:param key: The index key. This is optional because you can instead
supply a key/value pair such as name="James".
:param value: The index key's value. This is optional because you can
instead supply a key/value pair such as name="James".
:param pair: Optional keyword param. Instead of supplying key=name
and value = 'James', you can supply a key/value pair in
the form of name='James'.
"""
raise NotImplementedError
def _get_key_value(self, key, value, pair):
"""Return the key and value, regardless of how it was entered."""
if pair:
key, value = pair.popitem()
return key, value
def _get_method(self, **method_map):
method_name = method_map[self.index_class]
method = getattr(self.client, method_name)
return method
def lookup(self, key=None, value=None, **pair):
"""
Return a generator containing all the elements with key property equal
to value in the index.
:param key: The index key. This is optional because you can instead
supply a key/value pair such as name="James".
:param value: The index key's value. This is optional because you can
instead supply a key/value pair such as name="James".
:param raw: Optional keyword param. If set to True, it won't try to
initialize the results. Defaults to False.
:param pair: Optional keyword param. Instead of supplying key=name
and value = 'James', you can supply a key/value pair in
the form of name='James'.
"""
key, value = self._get_key_value(key, value, pair)
resp = self.client.lookup_vertex(self.index_name,key,value)
return initialize_elements(self.client,resp)
def get_unique(self,key=None,value=None,**pair):
"""
Returns a max of 1 elements matching the key/value pair in the index.
:param key: The index key. This is optional because you can instead
supply a key/value pair such as name="James".
:param value: The index key's value. This is optional because you can
instead supply a key/value pair such as name="James".
:param pair: Optional keyword param. Instead of supplying key=name
and value = 'James', you can supply a key/value pair in
the form of name='James'.
"""
key, value = self._get_key_value(key,value,pair)
resp = self.client.lookup_vertex(self.index_name,key,value)
if resp.total_size > 0:
result = get_one_result(resp)
return initialize_element(self.client, result)
class KeyIndex(Index):
def keys(self):
"""Return the index's keys."""
# Titan does not support edge indices.
resp = self.client.get_vertex_keys()
return [result.raw for result in resp.results]
def create_key(self, key):
# TODO: You can't create a key if prop already exists - workaround?
if self.base_type is "edge":
return self.create_edge_key(key)
return self.create_vertex_key(key)
def create_vertex_key(self, key):
return self.client.create_vertex_key_index(key)
def create_edge_key(self, key):
return self.client.create_vertex_key_index(key)
def rebuild(self):
raise NotImplementedError # (for now)
# need class_map b/c the Blueprints need capitalized class names,
# but Rexster returns lower-case class names for index_class
method_map = dict(vertex=self.client.rebuild_vertex_index,
edge=self.client.rebuild_edge_index)
rebuild_method = method_map.get(self.index_class)
resp = rebuild_method(self.index_name)
return list(resp.results)
|
[
"bulbs.utils.initialize_element",
"bulbs.utils.initialize_elements",
"bulbs.utils.get_one_result"
] |
[((7595, 7633), 'bulbs.utils.initialize_elements', 'initialize_elements', (['self.client', 'resp'], {}), '(self.client, resp)\n', (7614, 7633), False, 'from bulbs.utils import initialize_element, initialize_elements, get_one_result\n'), ((8471, 8491), 'bulbs.utils.get_one_result', 'get_one_result', (['resp'], {}), '(resp)\n', (8485, 8491), False, 'from bulbs.utils import initialize_element, initialize_elements, get_one_result\n'), ((8511, 8550), 'bulbs.utils.initialize_element', 'initialize_element', (['self.client', 'result'], {}), '(self.client, result)\n', (8529, 8550), False, 'from bulbs.utils import initialize_element, initialize_elements, get_one_result\n')]
|
# -*- coding: utf-8 -*-
from os import getcwd, path, environ, makedirs
from yaml import safe_load
basedir = path.abspath(path.dirname(__file__))
SETTINGS = safe_load(open(path.join(basedir, 'server', 'ops_settings.yml')))
TILES_DIR = path.join(getcwd(), 'tiles')
if "TESTING" in environ:
SETTINGS['provider_parameters']['table_name'] = SETTINGS['provider_parameters']['table_name'] + '_test'
TILES_DIR = path.join(basedir, 'tests', 'tile')
if "CI" in environ:
SETTINGS['provider_parameters']['port'] = 5433
if not path.exists(TILES_DIR):
makedirs(TILES_DIR)
__version__ = "0.1"
|
[
"os.makedirs",
"os.getcwd",
"os.path.dirname",
"os.path.exists",
"os.path.join"
] |
[((123, 145), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (135, 145), False, 'from os import getcwd, path, environ, makedirs\n'), ((247, 255), 'os.getcwd', 'getcwd', ([], {}), '()\n', (253, 255), False, 'from os import getcwd, path, environ, makedirs\n'), ((416, 451), 'os.path.join', 'path.join', (['basedir', '"""tests"""', '"""tile"""'], {}), "(basedir, 'tests', 'tile')\n", (425, 451), False, 'from os import getcwd, path, environ, makedirs\n'), ((539, 561), 'os.path.exists', 'path.exists', (['TILES_DIR'], {}), '(TILES_DIR)\n', (550, 561), False, 'from os import getcwd, path, environ, makedirs\n'), ((567, 586), 'os.makedirs', 'makedirs', (['TILES_DIR'], {}), '(TILES_DIR)\n', (575, 586), False, 'from os import getcwd, path, environ, makedirs\n'), ((173, 221), 'os.path.join', 'path.join', (['basedir', '"""server"""', '"""ops_settings.yml"""'], {}), "(basedir, 'server', 'ops_settings.yml')\n", (182, 221), False, 'from os import getcwd, path, environ, makedirs\n')]
|
import sonnet as snt
import tensorflow as tf
from luminoth.utils.bbox_transform_tf import decode, clip_boxes, change_order
class RPNProposal(snt.AbstractModule):
"""Transforms anchors and RPN predictions into object proposals.
Using the fixed anchors and the RPN predictions for both classification
and regression (adjusting the bounding box), we return a list of objects
sorted by relevance.
Besides applying the transformations (or adjustments) from the prediction,
it tries to get rid of duplicate proposals by using non maximum supression
(NMS).
"""
def __init__(self, num_anchors, config, debug=False, name="proposal_layer"):
super(RPNProposal, self).__init__(name=name)
self._num_anchors = num_anchors
# Filtering config
# Before applying NMS we filter the top N anchors.
self._pre_nms_top_n = config.pre_nms_top_n
self._apply_nms = config.apply_nms
# After applying NMS we filter the top M anchors.
# It's important to understand that because of NMS, it is not certain
# we will have this many output proposals. This is just the upper
# bound.
self._post_nms_top_n = config.post_nms_top_n
# Threshold to use for NMS.
self._nms_threshold = float(config.nms_threshold)
# Currently we do not filter out proposals by size.
self._min_size = config.min_size
self._filter_outside_anchors = config.filter_outside_anchors
self._clip_after_nms = config.clip_after_nms
self._min_prob_threshold = float(config.min_prob_threshold)
self._debug = debug
def _build(self, rpn_cls_prob, rpn_bbox_pred, all_anchors, im_shape):
"""
Args:
rpn_cls_prob: A Tensor with the softmax output for each anchor.
Its shape should be (total_anchors, 2), with the probability of
being background and the probability of being foreground for
each anchor.
rpn_bbox_pred: A Tensor with the regression output for each anchor.
Its shape should be (total_anchors, 4).
all_anchors: A Tensor with the anchors bounding boxes of shape
(total_anchors, 4), having (x_min, y_min, x_max, y_max) for
each anchor.
im_shape: A Tensor with the image shape in format (height, width).
Returns:
prediction_dict with the following keys:
proposals: A Tensor with the final selected proposed
bounding boxes. Its shape should be
(total_proposals, 4).
scores: A Tensor with the probability of being an
object for that proposal. Its shape should be
(total_proposals, 1)
"""
# Scores are extracted from the second scalar of the cls probability.
# cls_probability is a softmax of (background, foreground).
all_scores = rpn_cls_prob[:, 1]
# Force flatten the scores (it should be already be flatten).
all_scores = tf.reshape(all_scores, [-1])
if self._filter_outside_anchors:
with tf.name_scope("filter_outside_anchors"):
(x_min_anchor, y_min_anchor, x_max_anchor, y_max_anchor) = tf.unstack(
all_anchors, axis=1
)
anchor_filter = tf.logical_and(
tf.logical_and(
tf.greater_equal(x_min_anchor, 0),
tf.greater_equal(y_min_anchor, 0),
),
tf.logical_and(
tf.less(x_max_anchor, im_shape[1]),
tf.less(y_max_anchor, im_shape[0]),
),
)
anchor_filter = tf.reshape(anchor_filter, [-1])
all_anchors = tf.boolean_mask(
all_anchors, anchor_filter, name="filter_anchors"
)
rpn_bbox_pred = tf.boolean_mask(rpn_bbox_pred, anchor_filter)
all_scores = tf.boolean_mask(all_scores, anchor_filter)
# Decode boxes
all_proposals = decode(all_anchors, rpn_bbox_pred)
# Filter proposals with less than threshold probability.
min_prob_filter = tf.greater_equal(all_scores, self._min_prob_threshold)
# Filter proposals with negative or zero area.
(x_min, y_min, x_max, y_max) = tf.unstack(all_proposals, axis=1)
zero_area_filter = tf.greater(
tf.maximum(x_max - x_min, 0.0) * tf.maximum(y_max - y_min, 0.0), 0.0
)
proposal_filter = tf.logical_and(zero_area_filter, min_prob_filter)
# Filter proposals and scores.
all_proposals_total = tf.shape(all_scores)[0]
unsorted_scores = tf.boolean_mask(
all_scores, proposal_filter, name="filtered_scores"
)
unsorted_proposals = tf.boolean_mask(
all_proposals, proposal_filter, name="filtered_proposals"
)
if self._debug:
proposals_unclipped = tf.identity(unsorted_proposals)
if not self._clip_after_nms:
# Clip proposals to the image.
unsorted_proposals = clip_boxes(unsorted_proposals, im_shape)
filtered_proposals_total = tf.shape(unsorted_scores)[0]
tf.summary.scalar(
"valid_proposals_ratio",
(
tf.cast(filtered_proposals_total, tf.float32)
/ tf.cast(all_proposals_total, tf.float32)
),
["rpn"],
)
tf.summary.scalar(
"invalid_proposals", all_proposals_total - filtered_proposals_total, ["rpn"]
)
# Get top `pre_nms_top_n` indices by sorting the proposals by score.
k = tf.minimum(self._pre_nms_top_n, tf.shape(unsorted_scores)[0])
top_k = tf.nn.top_k(unsorted_scores, k=k)
sorted_top_proposals = tf.gather(unsorted_proposals, top_k.indices)
sorted_top_scores = top_k.values
if self._apply_nms:
with tf.name_scope("nms"):
# We reorder the proposals into TensorFlows bounding box order
# for `tf.image.non_max_supression` compatibility.
proposals_tf_order = change_order(sorted_top_proposals)
# We cut the pre_nms filter in pure TF version and go straight
# into NMS.
selected_indices = tf.image.non_max_suppression(
proposals_tf_order,
tf.reshape(sorted_top_scores, [-1]),
self._post_nms_top_n,
iou_threshold=self._nms_threshold,
)
# Selected_indices is a smaller tensor, we need to extract the
# proposals and scores using it.
nms_proposals_tf_order = tf.gather(
proposals_tf_order, selected_indices, name="gather_nms_proposals"
)
# We switch back again to the regular bbox encoding.
proposals = change_order(nms_proposals_tf_order)
scores = tf.gather(
sorted_top_scores,
selected_indices,
name="gather_nms_proposals_scores",
)
else:
proposals = sorted_top_proposals
scores = sorted_top_scores
if self._clip_after_nms:
# Clip proposals to the image after NMS.
proposals = clip_boxes(proposals, im_shape)
pred = {
"proposals": proposals,
"scores": scores,
}
if self._debug:
pred.update(
{
"sorted_top_scores": sorted_top_scores,
"sorted_top_proposals": sorted_top_proposals,
"unsorted_proposals": unsorted_proposals,
"unsorted_scores": unsorted_scores,
"all_proposals": all_proposals,
"all_scores": all_scores,
# proposals_unclipped has the unsorted_scores scores
"proposals_unclipped": proposals_unclipped,
}
)
return pred
|
[
"luminoth.utils.bbox_transform_tf.change_order",
"tensorflow.logical_and",
"tensorflow.summary.scalar",
"tensorflow.gather",
"tensorflow.boolean_mask",
"tensorflow.nn.top_k",
"tensorflow.reshape",
"tensorflow.identity",
"tensorflow.maximum",
"tensorflow.less",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.greater_equal",
"luminoth.utils.bbox_transform_tf.decode",
"tensorflow.name_scope",
"luminoth.utils.bbox_transform_tf.clip_boxes",
"tensorflow.unstack"
] |
[((3099, 3127), 'tensorflow.reshape', 'tf.reshape', (['all_scores', '[-1]'], {}), '(all_scores, [-1])\n', (3109, 3127), True, 'import tensorflow as tf\n'), ((4193, 4227), 'luminoth.utils.bbox_transform_tf.decode', 'decode', (['all_anchors', 'rpn_bbox_pred'], {}), '(all_anchors, rpn_bbox_pred)\n', (4199, 4227), False, 'from luminoth.utils.bbox_transform_tf import decode, clip_boxes, change_order\n'), ((4320, 4374), 'tensorflow.greater_equal', 'tf.greater_equal', (['all_scores', 'self._min_prob_threshold'], {}), '(all_scores, self._min_prob_threshold)\n', (4336, 4374), True, 'import tensorflow as tf\n'), ((4470, 4503), 'tensorflow.unstack', 'tf.unstack', (['all_proposals'], {'axis': '(1)'}), '(all_proposals, axis=1)\n', (4480, 4503), True, 'import tensorflow as tf\n'), ((4660, 4709), 'tensorflow.logical_and', 'tf.logical_and', (['zero_area_filter', 'min_prob_filter'], {}), '(zero_area_filter, min_prob_filter)\n', (4674, 4709), True, 'import tensorflow as tf\n'), ((4830, 4898), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['all_scores', 'proposal_filter'], {'name': '"""filtered_scores"""'}), "(all_scores, proposal_filter, name='filtered_scores')\n", (4845, 4898), True, 'import tensorflow as tf\n'), ((4950, 5024), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['all_proposals', 'proposal_filter'], {'name': '"""filtered_proposals"""'}), "(all_proposals, proposal_filter, name='filtered_proposals')\n", (4965, 5024), True, 'import tensorflow as tf\n'), ((5612, 5711), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""invalid_proposals"""', '(all_proposals_total - filtered_proposals_total)', "['rpn']"], {}), "('invalid_proposals', all_proposals_total -\n filtered_proposals_total, ['rpn'])\n", (5629, 5711), True, 'import tensorflow as tf\n'), ((5898, 5931), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['unsorted_scores'], {'k': 'k'}), '(unsorted_scores, k=k)\n', (5909, 5931), True, 'import tensorflow as tf\n'), ((5964, 6008), 'tensorflow.gather', 'tf.gather', (['unsorted_proposals', 'top_k.indices'], {}), '(unsorted_proposals, top_k.indices)\n', (5973, 6008), True, 'import tensorflow as tf\n'), ((4780, 4800), 'tensorflow.shape', 'tf.shape', (['all_scores'], {}), '(all_scores)\n', (4788, 4800), True, 'import tensorflow as tf\n'), ((5105, 5136), 'tensorflow.identity', 'tf.identity', (['unsorted_proposals'], {}), '(unsorted_proposals)\n', (5116, 5136), True, 'import tensorflow as tf\n'), ((5251, 5291), 'luminoth.utils.bbox_transform_tf.clip_boxes', 'clip_boxes', (['unsorted_proposals', 'im_shape'], {}), '(unsorted_proposals, im_shape)\n', (5261, 5291), False, 'from luminoth.utils.bbox_transform_tf import decode, clip_boxes, change_order\n'), ((5328, 5353), 'tensorflow.shape', 'tf.shape', (['unsorted_scores'], {}), '(unsorted_scores)\n', (5336, 5353), True, 'import tensorflow as tf\n'), ((7536, 7567), 'luminoth.utils.bbox_transform_tf.clip_boxes', 'clip_boxes', (['proposals', 'im_shape'], {}), '(proposals, im_shape)\n', (7546, 7567), False, 'from luminoth.utils.bbox_transform_tf import decode, clip_boxes, change_order\n'), ((3187, 3226), 'tensorflow.name_scope', 'tf.name_scope', (['"""filter_outside_anchors"""'], {}), "('filter_outside_anchors')\n", (3200, 3226), True, 'import tensorflow as tf\n'), ((3303, 3334), 'tensorflow.unstack', 'tf.unstack', (['all_anchors'], {'axis': '(1)'}), '(all_anchors, axis=1)\n', (3313, 3334), True, 'import tensorflow as tf\n'), ((3828, 3859), 'tensorflow.reshape', 'tf.reshape', (['anchor_filter', '[-1]'], {}), '(anchor_filter, [-1])\n', (3838, 3859), True, 'import tensorflow as tf\n'), ((3890, 3956), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['all_anchors', 'anchor_filter'], {'name': '"""filter_anchors"""'}), "(all_anchors, anchor_filter, name='filter_anchors')\n", (3905, 3956), True, 'import tensorflow as tf\n'), ((4027, 4072), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['rpn_bbox_pred', 'anchor_filter'], {}), '(rpn_bbox_pred, anchor_filter)\n', (4042, 4072), True, 'import tensorflow as tf\n'), ((4102, 4144), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['all_scores', 'anchor_filter'], {}), '(all_scores, anchor_filter)\n', (4117, 4144), True, 'import tensorflow as tf\n'), ((4555, 4585), 'tensorflow.maximum', 'tf.maximum', (['(x_max - x_min)', '(0.0)'], {}), '(x_max - x_min, 0.0)\n', (4565, 4585), True, 'import tensorflow as tf\n'), ((4588, 4618), 'tensorflow.maximum', 'tf.maximum', (['(y_max - y_min)', '(0.0)'], {}), '(y_max - y_min, 0.0)\n', (4598, 4618), True, 'import tensorflow as tf\n'), ((5452, 5497), 'tensorflow.cast', 'tf.cast', (['filtered_proposals_total', 'tf.float32'], {}), '(filtered_proposals_total, tf.float32)\n', (5459, 5497), True, 'import tensorflow as tf\n'), ((5516, 5556), 'tensorflow.cast', 'tf.cast', (['all_proposals_total', 'tf.float32'], {}), '(all_proposals_total, tf.float32)\n', (5523, 5556), True, 'import tensorflow as tf\n'), ((5852, 5877), 'tensorflow.shape', 'tf.shape', (['unsorted_scores'], {}), '(unsorted_scores)\n', (5860, 5877), True, 'import tensorflow as tf\n'), ((6096, 6116), 'tensorflow.name_scope', 'tf.name_scope', (['"""nms"""'], {}), "('nms')\n", (6109, 6116), True, 'import tensorflow as tf\n'), ((6301, 6335), 'luminoth.utils.bbox_transform_tf.change_order', 'change_order', (['sorted_top_proposals'], {}), '(sorted_top_proposals)\n', (6313, 6335), False, 'from luminoth.utils.bbox_transform_tf import decode, clip_boxes, change_order\n'), ((6890, 6966), 'tensorflow.gather', 'tf.gather', (['proposals_tf_order', 'selected_indices'], {'name': '"""gather_nms_proposals"""'}), "(proposals_tf_order, selected_indices, name='gather_nms_proposals')\n", (6899, 6966), True, 'import tensorflow as tf\n'), ((7103, 7139), 'luminoth.utils.bbox_transform_tf.change_order', 'change_order', (['nms_proposals_tf_order'], {}), '(nms_proposals_tf_order)\n', (7115, 7139), False, 'from luminoth.utils.bbox_transform_tf import decode, clip_boxes, change_order\n'), ((7165, 7252), 'tensorflow.gather', 'tf.gather', (['sorted_top_scores', 'selected_indices'], {'name': '"""gather_nms_proposals_scores"""'}), "(sorted_top_scores, selected_indices, name=\n 'gather_nms_proposals_scores')\n", (7174, 7252), True, 'import tensorflow as tf\n'), ((6568, 6603), 'tensorflow.reshape', 'tf.reshape', (['sorted_top_scores', '[-1]'], {}), '(sorted_top_scores, [-1])\n', (6578, 6603), True, 'import tensorflow as tf\n'), ((3482, 3515), 'tensorflow.greater_equal', 'tf.greater_equal', (['x_min_anchor', '(0)'], {}), '(x_min_anchor, 0)\n', (3498, 3515), True, 'import tensorflow as tf\n'), ((3541, 3574), 'tensorflow.greater_equal', 'tf.greater_equal', (['y_min_anchor', '(0)'], {}), '(y_min_anchor, 0)\n', (3557, 3574), True, 'import tensorflow as tf\n'), ((3659, 3693), 'tensorflow.less', 'tf.less', (['x_max_anchor', 'im_shape[1]'], {}), '(x_max_anchor, im_shape[1])\n', (3666, 3693), True, 'import tensorflow as tf\n'), ((3719, 3753), 'tensorflow.less', 'tf.less', (['y_max_anchor', 'im_shape[0]'], {}), '(y_max_anchor, im_shape[0])\n', (3726, 3753), True, 'import tensorflow as tf\n')]
|
import json
from django.core.urlresolvers import reverse
from rest_framework import serializers
from olympia import amo
from olympia.amo.helpers import absolutify
from olympia.files.models import FileUpload
class FileUploadSerializer(serializers.ModelSerializer):
guid = serializers.CharField(source='addon.guid')
active = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
files = serializers.SerializerMethodField()
passed_review = serializers.SerializerMethodField()
# For backwards-compatibility reasons, we return the uuid as "pk".
pk = serializers.UUIDField(source='uuid', format='hex')
processed = serializers.BooleanField()
reviewed = serializers.SerializerMethodField()
valid = serializers.BooleanField(source='passed_all_validations')
validation_results = serializers.SerializerMethodField()
validation_url = serializers.SerializerMethodField()
class Meta:
model = FileUpload
fields = [
'guid',
'active',
'automated_signing',
'url',
'files',
'passed_review',
'pk',
'processed',
'reviewed',
'valid',
'validation_results',
'validation_url',
'version',
]
def __init__(self, *args, **kwargs):
self.version = kwargs.pop('version', None)
super(FileUploadSerializer, self).__init__(*args, **kwargs)
def get_url(self, instance):
return absolutify(reverse('signing.version', args=[instance.addon.guid,
instance.version,
instance.uuid.hex]))
def get_validation_url(self, instance):
return absolutify(reverse('devhub.upload_detail',
args=[instance.uuid.hex]))
def get_files(self, instance):
if self.version is not None:
return [{'download_url': f.get_signed_url('api'),
'hash': f.hash,
'signed': f.is_signed}
for f in self.version.files.all()]
else:
return []
def get_validation_results(self, instance):
if instance.validation:
return json.loads(instance.validation)
else:
return None
def get_reviewed(self, instance):
if self.version is not None:
return all(file_.reviewed for file_ in self.version.all_files)
else:
return False
def get_active(self, instance):
if self.version is not None:
return all(file_.status in amo.REVIEWED_STATUSES
for file_ in self.version.all_files)
else:
return False
def get_passed_review(self, instance):
return self.get_reviewed(instance) and self.get_active(instance)
|
[
"rest_framework.serializers.UUIDField",
"django.core.urlresolvers.reverse",
"rest_framework.serializers.SerializerMethodField",
"json.loads",
"rest_framework.serializers.CharField",
"rest_framework.serializers.BooleanField"
] |
[((280, 322), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""addon.guid"""'}), "(source='addon.guid')\n", (301, 322), False, 'from rest_framework import serializers\n'), ((336, 371), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (369, 371), False, 'from rest_framework import serializers\n'), ((382, 417), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (415, 417), False, 'from rest_framework import serializers\n'), ((430, 465), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (463, 465), False, 'from rest_framework import serializers\n'), ((486, 521), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (519, 521), False, 'from rest_framework import serializers\n'), ((603, 653), 'rest_framework.serializers.UUIDField', 'serializers.UUIDField', ([], {'source': '"""uuid"""', 'format': '"""hex"""'}), "(source='uuid', format='hex')\n", (624, 653), False, 'from rest_framework import serializers\n'), ((670, 696), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {}), '()\n', (694, 696), False, 'from rest_framework import serializers\n'), ((712, 747), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (745, 747), False, 'from rest_framework import serializers\n'), ((760, 817), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {'source': '"""passed_all_validations"""'}), "(source='passed_all_validations')\n", (784, 817), False, 'from rest_framework import serializers\n'), ((843, 878), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (876, 878), False, 'from rest_framework import serializers\n'), ((900, 935), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (933, 935), False, 'from rest_framework import serializers\n'), ((1549, 1644), 'django.core.urlresolvers.reverse', 'reverse', (['"""signing.version"""'], {'args': '[instance.addon.guid, instance.version, instance.uuid.hex]'}), "('signing.version', args=[instance.addon.guid, instance.version,\n instance.uuid.hex])\n", (1556, 1644), False, 'from django.core.urlresolvers import reverse\n'), ((1831, 1888), 'django.core.urlresolvers.reverse', 'reverse', (['"""devhub.upload_detail"""'], {'args': '[instance.uuid.hex]'}), "('devhub.upload_detail', args=[instance.uuid.hex])\n", (1838, 1888), False, 'from django.core.urlresolvers import reverse\n'), ((2331, 2362), 'json.loads', 'json.loads', (['instance.validation'], {}), '(instance.validation)\n', (2341, 2362), False, 'import json\n')]
|
from runner.run_description import RunDescription, Experiment, ParamGrid
from runner.runs.quad_multi_mix_baseline import QUAD_BASELINE_CLI
_params = ParamGrid([
('quads_local_obs', [2, 4]),
('seed', [0000, 1111, 2222, 3333]),
])
_experiment = Experiment(
'quad_mix_baseline-8_mixed_local_obs',
QUAD_BASELINE_CLI,
_params.generate_params(randomize=False),
)
RUN_DESCRIPTION = RunDescription('paper_quads_multi_mix_baseline_8a_local_obs_v115', experiments=[_experiment])
# On Brain server, when you use num_workers = 72, if the system reports: Resource temporarily unavailable,
# then, try to use two commands below
# export OMP_NUM_THREADS=1
# export USE_SIMPLE_THREADED_LEVEL3=1
# Command to use this script on server:
# xvfb-run python -m runner.run --run=quad_multi_mix_baseline --runner=processes --max_parallel=3 --pause_between=1 --experiments_per_gpu=1 --num_gpus=3
# Command to use this script on local machine:
# Please change num_workers to the physical cores of your local machine
# python -m runner.run --run=quad_multi_mix_baseline --runner=processes --max_parallel=3 --pause_between=1 --experiments_per_gpu=1 --num_gpus=3
|
[
"runner.run_description.ParamGrid",
"runner.run_description.RunDescription"
] |
[((150, 223), 'runner.run_description.ParamGrid', 'ParamGrid', (["[('quads_local_obs', [2, 4]), ('seed', [0, 1111, 2222, 3333])]"], {}), "([('quads_local_obs', [2, 4]), ('seed', [0, 1111, 2222, 3333])])\n", (159, 223), False, 'from runner.run_description import RunDescription, Experiment, ParamGrid\n'), ((398, 495), 'runner.run_description.RunDescription', 'RunDescription', (['"""paper_quads_multi_mix_baseline_8a_local_obs_v115"""'], {'experiments': '[_experiment]'}), "('paper_quads_multi_mix_baseline_8a_local_obs_v115',\n experiments=[_experiment])\n", (412, 495), False, 'from runner.run_description import RunDescription, Experiment, ParamGrid\n')]
|
#!/usr/bin/env python3
from .cluster import cluster_down, cluster_stop, cluster_up, cluster_check
from .commands import vlan_ifname
from .db import DB, Address
from .listener import Listener
from .veth import veth_up
from .vlan import vlan_link_bridge, vlan_link_up
from redis import Redis
from signal import signal, SIGTERM, SIGINT
import functools
import logging
import os
import re
import sys
logger = logging.getLogger(__name__)
def get_env():
env = {}
env['REDIS_HOSTNAME'] = os.getenv('REDIS_HOSTNAME', 'redis')
env['REDIS_DB'] = int(os.getenv('REDIS_DB', '0'))
env['REDIS_PORT'] = os.getenv('REDIS_PORT', '6379')
env['REDIS_PORT'] = int(env['REDIS_PORT'])
env['REDIS_PASSWORD'] = os.getenv('REDIS_PASSWORD')
env['LOG_LEVEL'] = os.getenv('LOG_LEVEL', 'INFO').upper()
env['LOG_FILE'] = os.getenv('LOG_FILE', None)
env['CLUSTER_TIMEOUT'] = float(os.getenv('CLUSTER_TIMEOUT', 15*60))
return env
def connection_from_channel(channel):
addr = Address.deserialize(re.search(r'Connection:(?P<addr>\S+):', channel.decode()).group('addr'))
connection = DB.Connection(addr)
if not connection.exists():
raise ValueError(f"Connection {connection.id} unexpectedly deleted")
return connection
def main():
env = get_env()
logging.basicConfig(
format="[{asctime:s}] {levelname:s}: {message:s}",
level=env['LOG_LEVEL'],
filename=env['LOG_FILE'],
datefmt="%m/%d/%y %H:%M:%S",
style='{'
)
# Connect to DB
DB.redis = Redis(host=env['REDIS_HOSTNAME'], db=env['REDIS_DB'], port=env['REDIS_PORT'], password=env['REDIS_PASSWORD'])
listener = Listener()
@functools.partial(signal, SIGTERM)
def stop_handler(signum, frame):
logger.info("Shutting down...")
listener.stop()
sys.exit(0)
@listener.on(b'__keyspace@*__:Connection:*:alive', event=b'set')
def connection_set(channel, _):
connection = connection_from_channel(channel)
user = connection.user
vpn = connection.vpn
cluster = DB.Cluster(user, vpn.chal)
logger.info("New connection %s to cluster %s", connection.id, cluster.id)
if connection.alive:
if len(cluster.connections) <= 0:
logger.warning("Connect %s set as alive despite 0 connections registered on %s", connection.id, cluster.id)
veth_up(vpn)
cluster_check(user, vpn, cluster)
cluster_up(user, vpn, cluster)
vlan_link_up(vpn, user)
vlan_link_bridge(vpn, user, cluster)
else:
connection.delete('alive')
@listener.on(b'__keyspace@*__:Connection:*:alive', event=b'expired')
@listener.on(b'__keyspace@*__:Connection:*:alive', event=b'del')
def connection_deleted(channel, event):
connection = connection_from_channel(channel)
user = connection.user
vpn = connection.vpn
cluster = DB.Cluster(user, vpn.chal)
action = "Expired" if event == "expired" else "Deleted"
logger.info("%s connection %s to %s", action, connection.id, cluster.id)
cluster.connections.remove(connection)
if len(cluster.connections) > 0:
logging.debug("Cluster %s has active connections", cluster.id)
else:
logger.info("No connections to cluster %s; Setting timeout for %d seconds", cluster.id, env['CLUSTER_TIMEOUT'])
cluster.status = DB.Cluster.EXPIRING
cluster.expire(status=env['CLUSTER_TIMEOUT'])
connection.delete()
@listener.on(b'__keyspace@*__:Cluster:*:status', event=b'expired')
def cluster_expired(channel, _):
m = re.search(r'Cluster:(?P<user>\S+)@(?P<chal>\S+):status', channel.decode())
user, chal = DB.User(m.group('user')), DB.Challenge(m.group('chal'))
cluster = DB.Cluster(user, chal)
vpn = cluster.vpn
logger.info("Expired cluster %s", cluster.id)
cluster_down(user, vpn, cluster)
cluster.delete()
@listener.on(b'__keyspace@*__:Vpn:*:veth', event=b'set')
def veth_set(channel, _):
vpn = DB.Vpn(re.search(r'Vpn:(?P<id>\S+):veth', channel.decode()).group('id'))
logger.info("New vpn %s has come online", vpn.id)
veth_up(vpn)
try:
listener.run()
except (SystemExit, KeyboardInterrupt):
pass
if __name__ == "__main__":
main()
|
[
"redis.Redis",
"functools.partial",
"logging.debug",
"logging.basicConfig",
"sys.exit",
"os.getenv",
"logging.getLogger"
] |
[((407, 434), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (424, 434), False, 'import logging\n'), ((492, 528), 'os.getenv', 'os.getenv', (['"""REDIS_HOSTNAME"""', '"""redis"""'], {}), "('REDIS_HOSTNAME', 'redis')\n", (501, 528), False, 'import os\n'), ((607, 638), 'os.getenv', 'os.getenv', (['"""REDIS_PORT"""', '"""6379"""'], {}), "('REDIS_PORT', '6379')\n", (616, 638), False, 'import os\n'), ((714, 741), 'os.getenv', 'os.getenv', (['"""REDIS_PASSWORD"""'], {}), "('REDIS_PASSWORD')\n", (723, 741), False, 'import os\n'), ((826, 853), 'os.getenv', 'os.getenv', (['"""LOG_FILE"""', 'None'], {}), "('LOG_FILE', None)\n", (835, 853), False, 'import os\n'), ((1291, 1460), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[{asctime:s}] {levelname:s}: {message:s}"""', 'level': "env['LOG_LEVEL']", 'filename': "env['LOG_FILE']", 'datefmt': '"""%m/%d/%y %H:%M:%S"""', 'style': '"""{"""'}), "(format='[{asctime:s}] {levelname:s}: {message:s}',\n level=env['LOG_LEVEL'], filename=env['LOG_FILE'], datefmt=\n '%m/%d/%y %H:%M:%S', style='{')\n", (1310, 1460), False, 'import logging\n'), ((1534, 1648), 'redis.Redis', 'Redis', ([], {'host': "env['REDIS_HOSTNAME']", 'db': "env['REDIS_DB']", 'port': "env['REDIS_PORT']", 'password': "env['REDIS_PASSWORD']"}), "(host=env['REDIS_HOSTNAME'], db=env['REDIS_DB'], port=env['REDIS_PORT'\n ], password=env['REDIS_PASSWORD'])\n", (1539, 1648), False, 'from redis import Redis\n'), ((1677, 1711), 'functools.partial', 'functools.partial', (['signal', 'SIGTERM'], {}), '(signal, SIGTERM)\n', (1694, 1711), False, 'import functools\n'), ((555, 581), 'os.getenv', 'os.getenv', (['"""REDIS_DB"""', '"""0"""'], {}), "('REDIS_DB', '0')\n", (564, 581), False, 'import os\n'), ((889, 926), 'os.getenv', 'os.getenv', (['"""CLUSTER_TIMEOUT"""', '(15 * 60)'], {}), "('CLUSTER_TIMEOUT', 15 * 60)\n", (898, 926), False, 'import os\n'), ((1822, 1833), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1830, 1833), False, 'import sys\n'), ((765, 795), 'os.getenv', 'os.getenv', (['"""LOG_LEVEL"""', '"""INFO"""'], {}), "('LOG_LEVEL', 'INFO')\n", (774, 795), False, 'import os\n'), ((3228, 3290), 'logging.debug', 'logging.debug', (['"""Cluster %s has active connections"""', 'cluster.id'], {}), "('Cluster %s has active connections', cluster.id)\n", (3241, 3290), False, 'import logging\n')]
|
import sys
from PyQt5.QtWidgets import QMainWindow, QApplication
from PyQt5.QtGui import QIcon
class FirstMainWin(QMainWindow):
def __init__(self, parent=None):
super(FirstMainWin, self).__init__(parent)
self.setWindowTitle('1')
self.resize(400, 300)
self.status = self.statusBar()
self.status.showMessage('only 5s', 5000)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = FirstMainWin()
main.show()
sys.exit(app.exec_())
|
[
"PyQt5.QtWidgets.QApplication"
] |
[((411, 433), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (423, 433), False, 'from PyQt5.QtWidgets import QMainWindow, QApplication\n')]
|
import RPi.GPIO as GPIO
import time
import threading
import json
import pins
import commands
import settings
def main(action):
try:
servo = _setup()
if action == commands.LOCK:
_lock(servo)
elif action == commands.UNLOCK:
_unlock(servo)
elif action == commands.BUZZ:
# buzz_thread = threading.Thread(target=_buzz)
# buzz_thread.start()
_buzz()
elif action == commands.BUZZ_AND_UNLOCK:
_buzzAndUnlock(servo)
elif action == commands.TOGGLE:
_toggleLock(servo)
elif action == commands.DELAY_LOCK:
_delayLock(servo)
else:
raise NotImplementedError
finally:
_tearDown(servo)
def _setup():
# servo
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pins.SERVO_PIN, GPIO.OUT)
servo = GPIO.PWM(pins.SERVO_PIN, 50) # 50Hz
# lock status LED
GPIO.setup(pins.LOCK_STATUS_LED_PIN, GPIO.OUT)
# buzzer pin
GPIO.setup(pins.BUZZER_PIN, GPIO.OUT)
GPIO.output(pins.BUZZER_PIN,GPIO.LOW)
return servo
def _tearDown(servo):
servo.stop()
# GPIO.cleanup() # disbled in order to keep LED lit
def _lock(servo):
servo.start(settings.SERVO_LOCKED_POSITION)
GPIO.output(pins.LOCK_STATUS_LED_PIN,GPIO.HIGH)
time.sleep(settings.SERVO_ROTATION_DURATION)
_setStateValue(settings.LOCKED_STATE_KEY, True)
def _unlock(servo):
servo.start(settings.SERVO_UNLOCKED_POSITION)
GPIO.output(pins.LOCK_STATUS_LED_PIN,GPIO.LOW)
time.sleep(settings.SERVO_ROTATION_DURATION)
_setStateValue(settings.LOCKED_STATE_KEY, False)
def _buzz():
try:
GPIO.output(pins.BUZZER_PIN,GPIO.HIGH) # start buzzing
time.sleep(settings.BUZZ_DURATION)
finally:
GPIO.output(pins.BUZZER_PIN,GPIO.LOW) # end buzzing
def _buzzAndUnlock(servo):
# buzz_thread = threading.Thread(target=_buzz)
# buzz_thread.start()
_buzz()
_delayLock(servo)
def _toggleLock(servo):
if _isCurrentlyLocked():
_unlock(servo)
else:
_lock(servo)
def _isCurrentlyLocked():
currently_locked = False
try:
currently_locked = _getStateValue(settings.LOCKED_STATE_KEY)
except KeyError:
pass # leave currently_locked as default
return currently_locked
def _getStateValue(key):
persistent_state = _getPersistantState()
value = persistent_state[key]
return value
def _setStateValue(key, value):
persistent_state = None
# load exisiting state
persistent_state = _getPersistantState()
# set new state
persistent_state[key] = value
# save new state
with open(settings.PERSISTENT_STATE_FILE, 'w') as f:
raw_json = json.dumps(persistent_state)
f.write(raw_json)
def _getPersistantState():
persistent_state = None
try:
with open(settings.PERSISTENT_STATE_FILE, 'r') as f:
raw_json = f.read()
persistent_state = json.loads(raw_json)
except ValueError:
persistent_state = {}
return persistent_state
def _delayLock(servo):
_unlock(servo)
_blinkingSleep(settings.DELAYED_LOCK_DELAY)
_lock(servo)
def _blinkingSleep(totalDuration):
for i in range(totalDuration):
GPIO.output(pins.LOCK_STATUS_LED_PIN,GPIO.LOW)
time.sleep(0.5)
GPIO.output(pins.LOCK_STATUS_LED_PIN,GPIO.HIGH)
time.sleep(0.5)
|
[
"RPi.GPIO.setmode",
"json.loads",
"RPi.GPIO.setup",
"json.dumps",
"time.sleep",
"RPi.GPIO.PWM",
"RPi.GPIO.output",
"RPi.GPIO.setwarnings"
] |
[((644, 667), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (660, 667), True, 'import RPi.GPIO as GPIO\n'), ((669, 693), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (681, 693), True, 'import RPi.GPIO as GPIO\n'), ((695, 731), 'RPi.GPIO.setup', 'GPIO.setup', (['pins.SERVO_PIN', 'GPIO.OUT'], {}), '(pins.SERVO_PIN, GPIO.OUT)\n', (705, 731), True, 'import RPi.GPIO as GPIO\n'), ((741, 769), 'RPi.GPIO.PWM', 'GPIO.PWM', (['pins.SERVO_PIN', '(50)'], {}), '(pins.SERVO_PIN, 50)\n', (749, 769), True, 'import RPi.GPIO as GPIO\n'), ((797, 843), 'RPi.GPIO.setup', 'GPIO.setup', (['pins.LOCK_STATUS_LED_PIN', 'GPIO.OUT'], {}), '(pins.LOCK_STATUS_LED_PIN, GPIO.OUT)\n', (807, 843), True, 'import RPi.GPIO as GPIO\n'), ((859, 896), 'RPi.GPIO.setup', 'GPIO.setup', (['pins.BUZZER_PIN', 'GPIO.OUT'], {}), '(pins.BUZZER_PIN, GPIO.OUT)\n', (869, 896), True, 'import RPi.GPIO as GPIO\n'), ((898, 936), 'RPi.GPIO.output', 'GPIO.output', (['pins.BUZZER_PIN', 'GPIO.LOW'], {}), '(pins.BUZZER_PIN, GPIO.LOW)\n', (909, 936), True, 'import RPi.GPIO as GPIO\n'), ((1105, 1153), 'RPi.GPIO.output', 'GPIO.output', (['pins.LOCK_STATUS_LED_PIN', 'GPIO.HIGH'], {}), '(pins.LOCK_STATUS_LED_PIN, GPIO.HIGH)\n', (1116, 1153), True, 'import RPi.GPIO as GPIO\n'), ((1154, 1198), 'time.sleep', 'time.sleep', (['settings.SERVO_ROTATION_DURATION'], {}), '(settings.SERVO_ROTATION_DURATION)\n', (1164, 1198), False, 'import time\n'), ((1317, 1364), 'RPi.GPIO.output', 'GPIO.output', (['pins.LOCK_STATUS_LED_PIN', 'GPIO.LOW'], {}), '(pins.LOCK_STATUS_LED_PIN, GPIO.LOW)\n', (1328, 1364), True, 'import RPi.GPIO as GPIO\n'), ((1365, 1409), 'time.sleep', 'time.sleep', (['settings.SERVO_ROTATION_DURATION'], {}), '(settings.SERVO_ROTATION_DURATION)\n', (1375, 1409), False, 'import time\n'), ((1482, 1521), 'RPi.GPIO.output', 'GPIO.output', (['pins.BUZZER_PIN', 'GPIO.HIGH'], {}), '(pins.BUZZER_PIN, GPIO.HIGH)\n', (1493, 1521), True, 'import RPi.GPIO as GPIO\n'), ((1539, 1573), 'time.sleep', 'time.sleep', (['settings.BUZZ_DURATION'], {}), '(settings.BUZZ_DURATION)\n', (1549, 1573), False, 'import time\n'), ((1586, 1624), 'RPi.GPIO.output', 'GPIO.output', (['pins.BUZZER_PIN', 'GPIO.LOW'], {}), '(pins.BUZZER_PIN, GPIO.LOW)\n', (1597, 1624), True, 'import RPi.GPIO as GPIO\n'), ((2433, 2461), 'json.dumps', 'json.dumps', (['persistent_state'], {}), '(persistent_state)\n', (2443, 2461), False, 'import json\n'), ((2900, 2947), 'RPi.GPIO.output', 'GPIO.output', (['pins.LOCK_STATUS_LED_PIN', 'GPIO.LOW'], {}), '(pins.LOCK_STATUS_LED_PIN, GPIO.LOW)\n', (2911, 2947), True, 'import RPi.GPIO as GPIO\n'), ((2949, 2964), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2959, 2964), False, 'import time\n'), ((2967, 3015), 'RPi.GPIO.output', 'GPIO.output', (['pins.LOCK_STATUS_LED_PIN', 'GPIO.HIGH'], {}), '(pins.LOCK_STATUS_LED_PIN, GPIO.HIGH)\n', (2978, 3015), True, 'import RPi.GPIO as GPIO\n'), ((3017, 3032), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3027, 3032), False, 'import time\n'), ((2641, 2661), 'json.loads', 'json.loads', (['raw_json'], {}), '(raw_json)\n', (2651, 2661), False, 'import json\n')]
|
import os
import signal
import time
import unittest
import stomp
from stomp import exception
from stomp.backward import monotonic
from stomp.listener import TestListener
from stomp.test.testutils import *
from concurrent.futures import ThreadPoolExecutor
executor = ThreadPoolExecutor()
def create_thread(fc):
f = executor.submit(fc)
print('Created future %s on executor %s' % (f, executor))
return f
class ReconnectListener(TestListener):
def __init__(self, conn):
TestListener.__init__(self, '123')
self.conn = conn
def on_receiver_loop_ended(self, *args):
if self.conn:
c = self.conn
self.conn = None
c.connect(get_default_user(), get_default_password(), wait=True)
c.disconnect()
class TestThreadingOverride(unittest.TestCase):
def setUp(self):
conn = stomp.Connection(get_default_host())
# check thread override here
conn.transport.override_threading(create_thread)
listener = ReconnectListener(conn)
conn.set_listener('', listener)
conn.connect(get_default_user(), get_default_password(), wait=True)
self.conn = conn
self.listener = listener
self.timestamp = time.strftime('%Y%m%d%H%M%S')
def test_basic(self):
queuename = '/queue/test1-%s' % self.timestamp
self.conn.subscribe(destination=queuename, id=1, ack='auto')
self.conn.send(body='this is a test', destination=queuename, receipt='123')
self.listener.wait_for_message()
self.assertTrue(self.listener.connections == 1, 'should have received 1 connection acknowledgement')
self.assertTrue(self.listener.messages == 1, 'should have received 1 message')
self.assertTrue(self.listener.errors == 0, 'should not have received any errors')
self.conn.disconnect(receipt=None)
self.conn.connect(get_default_user(), get_default_password(), wait=True)
self.conn.disconnect()
|
[
"concurrent.futures.ThreadPoolExecutor",
"stomp.listener.TestListener.__init__",
"time.strftime"
] |
[((269, 289), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '()\n', (287, 289), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((497, 531), 'stomp.listener.TestListener.__init__', 'TestListener.__init__', (['self', '"""123"""'], {}), "(self, '123')\n", (518, 531), False, 'from stomp.listener import TestListener\n'), ((1244, 1273), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""'], {}), "('%Y%m%d%H%M%S')\n", (1257, 1273), False, 'import time\n')]
|
import copy
import os
import pickle
import numpy as np
from numpy.linalg import inv
from numpy.matlib import repmat
import pylot.utils
class DepthFrame(object):
"""Class that stores depth frames.
Args:
frame: A numpy array storing the depth frame.
camera_setup (:py:class:`~pylot.drivers.sensor_setup.DepthCameraSetup`): # noqa: E501
The camera setup used by the sensor that generated this frame.
original_frame: A numpy array storing the RGB encoded depth image.
Attributes:
frame: A numpy array storing the depth frame.
camera_setup (:py:class:`~pylot.drivers.sensor_setup.DepthCameraSetup`):
The camera setup used by the sensor that generated this frame.
original_frame: A numpy array storing the RGB encoded depth image.
"""
def __init__(self, frame, camera_setup, original_frame=None):
self.frame = frame
self.camera_setup = camera_setup
self.original_frame = original_frame
# Attribute used to cache the depth frame as a point cloud. We're doing
# this because it is computationally expensive to transform a depth
# frame to a point cloud.
self._cached_point_cloud = None
@classmethod
def from_simulator_frame(cls,
frame,
camera_setup,
save_original_frame=False):
"""Creates a pylot depth frame from a simulator depth frame.
Args:
frame: An image instance containing the depth image.
camera_setup: The setup of the depth camera.
save_original_frame: True if the original RGB image needs to be
saved.
Returns:
:py:class:`.DepthFrame`: A depth frame.
"""
original_frame = None
# Convert an image containing simulator encoded depth-map to a 2D
# array containing the depth value of each pixel normalized
# between [0.0, 1.0]
_frame = np.frombuffer(frame.raw_data, dtype=np.dtype("uint8"))
_frame = np.reshape(_frame, (frame.height, frame.width, 4))
frame = _frame.astype(np.float32)
if save_original_frame:
original_frame = copy.deepcopy(frame[:, :, :3])
# Apply (R + G * 256 + B * 256 * 256) / (256 * 256 * 256 - 1).
frame = np.dot(frame[:, :, :3], [65536.0, 256.0, 1.0])
frame /= 16777215.0 # (256.0 * 256.0 * 256.0 - 1.0)
return cls(frame, camera_setup, original_frame)
def as_numpy_array(self):
"""Returns the depth frame as a numpy array."""
return self.frame
def as_point_cloud(self):
"""Converts the depth frame to a 1D array containing the 3D
position of each pixel in world coordinates.
See :py:class:`~pylot.drivers.sensor_setup.CameraSetup` for
coordinate axis orientations.
"""
far = 1000.0 # max depth in meters.
intrinsic_mat = self.camera_setup.get_intrinsic_matrix()
width, height = self.camera_setup.width, self.camera_setup.height
# 2d pixel coordinates
pixel_length = width * height
u_coord = repmat(np.r_[0:width:1], height, 1).reshape(pixel_length)
v_coord = repmat(np.c_[0:height:1], 1, width).reshape(pixel_length)
normalized_depth = np.reshape(self.frame, pixel_length)
# p2d = [u,v,1]
p2d = np.array([u_coord, v_coord, np.ones_like(u_coord)])
# P = [X,Y,Z]
p3d = np.dot(inv(intrinsic_mat), p2d)
p3d *= normalized_depth * far
# [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]
locations = np.asarray(np.transpose(p3d))
# Transform the points in 3D world coordinates.
to_world_transform = self.camera_setup.get_unreal_transform()
point_cloud = to_world_transform.transform_points(locations)
return point_cloud
def get_pixel_locations(self, pixels):
""" Gets the 3D world locations from pixel coordinates.
Args:
pixels: List of pylot.utils.Vector2D pixel coordinates.
Returns:
List of pylot.utils.Locations
"""
if self._cached_point_cloud is None:
self._cached_point_cloud = self.as_point_cloud()
pixel_locations = [
self._cached_point_cloud[pixel.y * self.camera_setup.width +
pixel.x] for pixel in pixels
]
return [
pylot.utils.Location(loc[0], loc[1], loc[2])
for loc in pixel_locations
]
def pixel_has_same_depth(self, x, y, z, threshold):
"""Checks if the depth of pixel (y,x) is within threshold of z."""
return abs(self.frame[int(y)][int(x)] * 1000 - z) < threshold
def resize(self, width, height):
"""Resizes the frame."""
import cv2
self.camera_setup.set_resolution(width, height)
self.frame = cv2.resize(self.frame,
dsize=(width, height),
interpolation=cv2.INTER_NEAREST)
def visualize(self, pygame_display, timestamp=None):
"""Visualizes the frame on a pygame display."""
if self.original_frame is not None:
import pygame
image_np = self.original_frame
image_np = image_np[:, :, ::-1]
image_np = np.transpose(image_np, (1, 0, 2))
pygame.surfarray.blit_array(pygame_display, image_np)
pygame.display.flip()
def save(self, timestamp, data_path, file_base):
"""Saves the depth frame to a file.
Args:
timestamp (:obj:`int`): Timestamp associated with the depth frame.
data_path (:obj:`str`): Path where to save the depth frame.
file_base (:obj:`str`): Base name of the file.
"""
file_name = os.path.join(data_path,
'{}-{}.pkl'.format(file_base, timestamp))
pickle.dump(self.as_numpy_array(),
open(file_name, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
def __repr__(self):
return 'DepthFrame(camera_setup: {}, frame: {})'.format(
self.camera_setup, self.frame)
def __str__(self):
return 'DepthFrame(camera_setup: {})'.format(self.camera_setup)
|
[
"copy.deepcopy",
"numpy.ones_like",
"numpy.dtype",
"numpy.transpose",
"pygame.display.flip",
"pygame.surfarray.blit_array",
"numpy.linalg.inv",
"numpy.reshape",
"numpy.dot",
"numpy.matlib.repmat",
"cv2.resize"
] |
[((2098, 2148), 'numpy.reshape', 'np.reshape', (['_frame', '(frame.height, frame.width, 4)'], {}), '(_frame, (frame.height, frame.width, 4))\n', (2108, 2148), True, 'import numpy as np\n'), ((2370, 2416), 'numpy.dot', 'np.dot', (['frame[:, :, :3]', '[65536.0, 256.0, 1.0]'], {}), '(frame[:, :, :3], [65536.0, 256.0, 1.0])\n', (2376, 2416), True, 'import numpy as np\n'), ((3350, 3386), 'numpy.reshape', 'np.reshape', (['self.frame', 'pixel_length'], {}), '(self.frame, pixel_length)\n', (3360, 3386), True, 'import numpy as np\n'), ((4945, 5023), 'cv2.resize', 'cv2.resize', (['self.frame'], {'dsize': '(width, height)', 'interpolation': 'cv2.INTER_NEAREST'}), '(self.frame, dsize=(width, height), interpolation=cv2.INTER_NEAREST)\n', (4955, 5023), False, 'import cv2\n'), ((2252, 2282), 'copy.deepcopy', 'copy.deepcopy', (['frame[:, :, :3]'], {}), '(frame[:, :, :3])\n', (2265, 2282), False, 'import copy\n'), ((3522, 3540), 'numpy.linalg.inv', 'inv', (['intrinsic_mat'], {}), '(intrinsic_mat)\n', (3525, 3540), False, 'from numpy.linalg import inv\n'), ((3667, 3684), 'numpy.transpose', 'np.transpose', (['p3d'], {}), '(p3d)\n', (3679, 3684), True, 'import numpy as np\n'), ((5382, 5415), 'numpy.transpose', 'np.transpose', (['image_np', '(1, 0, 2)'], {}), '(image_np, (1, 0, 2))\n', (5394, 5415), True, 'import numpy as np\n'), ((5428, 5481), 'pygame.surfarray.blit_array', 'pygame.surfarray.blit_array', (['pygame_display', 'image_np'], {}), '(pygame_display, image_np)\n', (5455, 5481), False, 'import pygame\n'), ((5494, 5515), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (5513, 5515), False, 'import pygame\n'), ((2062, 2079), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (2070, 2079), True, 'import numpy as np\n'), ((3189, 3224), 'numpy.matlib.repmat', 'repmat', (['np.r_[0:width:1]', 'height', '(1)'], {}), '(np.r_[0:width:1], height, 1)\n', (3195, 3224), False, 'from numpy.matlib import repmat\n'), ((3265, 3300), 'numpy.matlib.repmat', 'repmat', (['np.c_[0:height:1]', '(1)', 'width'], {}), '(np.c_[0:height:1], 1, width)\n', (3271, 3300), False, 'from numpy.matlib import repmat\n'), ((3454, 3475), 'numpy.ones_like', 'np.ones_like', (['u_coord'], {}), '(u_coord)\n', (3466, 3475), True, 'import numpy as np\n')]
|
"""
Coordinate Transformation Functions
This module contains the functions for converting one
`sunpy.coordinates.frames` object to another.
.. warning::
The functions in this submodule should never be called directly, transforming
between coordinate frames should be done using the ``.transform_to`` methods
on `~astropy.coordinates.BaseCoordinateFrame` or
`~astropy.coordinates.SkyCoord` instances.
"""
import logging
from copy import deepcopy
from functools import wraps
from contextlib import contextmanager
import numpy as np
import astropy.units as u
from astropy.constants import c as speed_of_light
from astropy.coordinates import (
HCRS,
ICRS,
BaseCoordinateFrame,
ConvertError,
HeliocentricMeanEcliptic,
get_body_barycentric,
)
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.builtin_frames import make_transform_graph_docs
from astropy.coordinates.builtin_frames.utils import get_jd12
from astropy.coordinates.matrix_utilities import matrix_product, matrix_transpose, rotation_matrix
from astropy.coordinates.representation import (
CartesianRepresentation,
SphericalRepresentation,
UnitSphericalRepresentation,
)
# Import erfa via astropy to make sure we are using the same ERFA library as Astropy
from astropy.coordinates.sky_coordinate import erfa
from astropy.coordinates.transformations import FunctionTransform, FunctionTransformWithFiniteDifference
from astropy.time import Time
from sunpy import log
from sunpy.sun import constants
from .frames import (
_J2000,
GeocentricEarthEquatorial,
GeocentricSolarEcliptic,
Heliocentric,
HeliocentricEarthEcliptic,
HeliocentricInertial,
HeliographicCarrington,
HeliographicStonyhurst,
Helioprojective,
)
RSUN_METERS = constants.get('radius').si.to(u.m)
__all__ = ['transform_with_sun_center',
'propagate_with_solar_surface',
'hgs_to_hgc', 'hgc_to_hgs', 'hcc_to_hpc',
'hpc_to_hcc', 'hcc_to_hgs', 'hgs_to_hcc',
'hpc_to_hpc',
'hcrs_to_hgs', 'hgs_to_hcrs',
'hgs_to_hgs', 'hgc_to_hgc', 'hcc_to_hcc',
'hme_to_hee', 'hee_to_hme', 'hee_to_hee',
'hee_to_gse', 'gse_to_hee', 'gse_to_gse',
'hgs_to_hci', 'hci_to_hgs', 'hci_to_hci',
'hme_to_gei', 'gei_to_hme', 'gei_to_gei']
# Boolean flag for whether to ignore the motion of the center of the Sun in inertial space
_ignore_sun_motion = False
# If not None, the name of the differential-rotation model to use for any obstime change
_autoapply_diffrot = None
@contextmanager
def transform_with_sun_center():
"""
Context manager for coordinate transformations to ignore the motion of the center of the Sun.
Normally, coordinates refer to a point in inertial space (relative to the barycenter of the
solar system). Transforming to a different observation time does not move the point at all,
but rather only updates the coordinate representation as needed for the origin and axis
orientations at the new observation time. However, the center of the Sun moves over time.
Thus, for example, a coordinate that lies on the surface of the Sun at one observation time
will not continue to lie on the surface of the Sun at other observation times.
Under this context manager, transformations will instead move the coordinate over time to
"follow" the translational motion of the center of Sun, thus maintaining the position of the
coordinate relative to the center of the Sun.
Notes
-----
This context manager accounts only for the motion of the center of the Sun, i.e.,
translational motion. The motion of solar features due to any rotation of the Sun about its
rotational axis is not accounted for.
Due to the implementation approach, this context manager modifies transformations between only
these five coordinate frames:
`~sunpy.coordinates.frames.HeliographicStonyhurst`,
`~sunpy.coordinates.frames.HeliographicCarrington`,
`~sunpy.coordinates.frames.HeliocentricInertial`,
`~sunpy.coordinates.frames.Heliocentric`, and
`~sunpy.coordinates.frames.Helioprojective`.
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> from sunpy.coordinates import HeliographicStonyhurst, transform_with_sun_center
>>> import astropy.units as u
>>> start_frame = HeliographicStonyhurst(obstime="2001-01-01")
>>> end_frame = HeliographicStonyhurst(obstime="2001-02-01")
>>> sun_center = SkyCoord(0*u.deg, 0*u.deg, 0*u.AU, frame=start_frame)
>>> sun_center
<SkyCoord (HeliographicStonyhurst: obstime=2001-01-01T00:00:00.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, AU)
(0., 0., 0.)>
>>> sun_center.transform_to(end_frame) # transformations do not normally follow Sun center
<SkyCoord (HeliographicStonyhurst: obstime=2001-02-01T00:00:00.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, AU)
(23.33174233, -5.96399877, 0.00027959)>
>>> with transform_with_sun_center():
... sun_center.transform_to(end_frame) # now following Sun center
<SkyCoord (HeliographicStonyhurst: obstime=2001-02-01T00:00:00.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, AU)
(0., 0., 0.)>
"""
try:
global _ignore_sun_motion
old_ignore_sun_motion = _ignore_sun_motion # nominally False
if not old_ignore_sun_motion:
log.debug("Ignoring the motion of the center of the Sun for transformations")
_ignore_sun_motion = True
yield
finally:
if not old_ignore_sun_motion:
log.debug("Stop ignoring the motion of the center of the Sun for transformations")
_ignore_sun_motion = old_ignore_sun_motion
@contextmanager
def propagate_with_solar_surface(rotation_model='howard'):
"""
Context manager for coordinate transformations to automatically apply solar
differential rotation for any change in observation time.
Normally, coordinates refer to a point in inertial space (relative to the
barycenter of the solar system). Transforming to a different observation time
does not move the point at all, but rather only updates the coordinate
representation as needed for the origin and axis orientations at the new
observation time.
Under this context manager, transformations will instead treat the coordinate
as if it were referring to a point on the solar surface instead of a point in
inertial space. If a transformation has a change in observation time, the
heliographic longitude of the point will be updated according to the specified
rotation model.
Parameters
----------
rotation_model : `str`
Accepted model names are ``'howard'`` (default), ``'snodgrass'``,
``'allen'``, and ``'rigid'``. See the documentation for
:func:`~sunpy.physics.differential_rotation.diff_rot` for the differences
between these models.
Notes
-----
This context manager also ignores the motion of the center of the Sun (see
:func:`~sunpy.coordinates.transformations.transform_with_sun_center`).
Due to the implementation approach, this context manager modifies
transformations between only these five coordinate frames:
`~sunpy.coordinates.frames.HeliographicStonyhurst`,
`~sunpy.coordinates.frames.HeliographicCarrington`,
`~sunpy.coordinates.frames.HeliocentricInertial`,
`~sunpy.coordinates.frames.Heliocentric`, and
`~sunpy.coordinates.frames.Helioprojective`.
Examples
--------
.. minigallery:: sunpy.coordinates.propagate_with_solar_surface
>>> import astropy.units as u
>>> from astropy.coordinates import SkyCoord
>>> from sunpy.coordinates import HeliocentricInertial, propagate_with_solar_surface
>>> meridian = SkyCoord(0*u.deg, [-60, -30, 0, 30, 60]*u.deg, 1*u.AU,
... frame=HeliocentricInertial, obstime='2021-09-15')
>>> out_frame = HeliocentricInertial(obstime='2021-09-21')
>>> with propagate_with_solar_surface():
... print(meridian.transform_to(out_frame))
<SkyCoord (HeliocentricInertial: obstime=2021-09-21T00:00:00.000): (lon, lat, distance) in (deg, deg, AU)
[(70.24182965, -60., 1.),
(82.09298036, -30., 1.),
(85.9579703 , 0., 1.),
(82.09298036, 30., 1.),
(70.24182965, 60., 1.)]>
>>> with propagate_with_solar_surface(rotation_model='rigid'):
... print(meridian.transform_to(out_frame))
<SkyCoord (HeliocentricInertial: obstime=2021-09-21T00:00:00.000): (lon, lat, distance) in (deg, deg, AU)
[(85.1064, -60., 1.), (85.1064, -30., 1.),
(85.1064, 0., 1.), (85.1064, 30., 1.),
(85.1064, 60., 1.)]>
"""
with transform_with_sun_center():
try:
global _autoapply_diffrot
old_autoapply_diffrot = _autoapply_diffrot # nominally False
log.debug("Enabling automatic solar differential rotation "
f"('{rotation_model}') for any changes in obstime")
_autoapply_diffrot = rotation_model
yield
finally:
if not old_autoapply_diffrot:
log.debug("Disabling automatic solar differential rotation "
"for any changes in obstime")
_autoapply_diffrot = old_autoapply_diffrot
# Global counter to keep track of the layer of transformation
_layer_level = 0
def _transformation_debug(description):
"""
Decorator to produce debugging output for a transformation function: its description, inputs,
and output. Unicode box-drawing characters are used.
"""
def decorator(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
global _layer_level
# Check if the logging level is at least DEBUG (for performance reasons)
debug_output = log.getEffectiveLevel() <= logging.DEBUG
if debug_output:
# Indention for transformation layer
indention = u"\u2502 " * _layer_level
# For the input arguments, add indention to any lines after the first line
from_str = str(args[0]).replace("\n", f"\n {indention}\u2502 ")
to_str = str(args[1]).replace("\n", f"\n {indention}\u2502 ")
# Log the description and the input arguments
log.debug(f"{indention}{description}")
log.debug(f"{indention}\u251c\u2500From: {from_str}")
log.debug(f"{indention}\u251c\u2500To : {to_str}")
# Increment the layer level to increase the indention for nested transformations
_layer_level += 1
result = func(*args, **kwargs)
if debug_output:
# Decrement the layer level
_layer_level -= 1
# For the output, add intention to any lines after the first line
out_str = str(result).replace("\n", f"\n {indention} ")
# Log the output
log.debug(f"{indention}\u2514\u2500Out : {out_str}")
return result
return wrapped_func
return decorator
def _observers_are_equal(obs_1, obs_2):
# Note that this also lets pass the situation where both observers are None
if obs_1 is obs_2:
return True
# obs_1 != obs_2
if obs_1 is None:
raise ConvertError("The source observer is set to None, but the transformation requires "
"the source observer to be specified, as the destination observer "
f"is set to {obs_2}.")
if obs_2 is None:
raise ConvertError("The destination observer is set to None, but the transformation "
"requires the destination observer to be specified, as the "
f"source observer is set to {obs_1}.")
if isinstance(obs_1, str):
if obs_1 == "self":
return False
raise ConvertError("The source observer needs to have `obstime` set because the "
"destination observer is different.")
if isinstance(obs_2, str):
if obs_2 == "self":
return False
raise ConvertError("The destination observer needs to have `obstime` set because the "
"source observer is different.")
return np.atleast_1d((u.allclose(obs_1.lat, obs_2.lat) and
u.allclose(obs_1.lon, obs_2.lon) and
u.allclose(obs_1.radius, obs_2.radius) and
_times_are_equal(obs_1.obstime, obs_2.obstime))).all()
def _check_observer_defined(frame):
if frame.observer is None:
raise ConvertError("This transformation cannot be performed because the "
f"{frame.__class__.__name__} frame has observer=None.")
elif isinstance(frame.observer, str):
if frame.observer != "self":
raise ConvertError("This transformation cannot be performed because the "
f"{frame.__class__.__name__} frame needs a specified obstime "
f"to fully resolve observer='{frame.observer}'.")
elif not isinstance(frame, HeliographicCarrington):
raise ConvertError(f"The {frame.__class__.__name__} frame has observer='self' "
"but this is valid for only HeliographicCarrington frames.")
def _times_are_equal(time_1, time_2):
# Checks whether times are equal
if isinstance(time_1, Time) and isinstance(time_2, Time):
# We explicitly perform the check in TAI to avoid possible numerical precision differences
# between a time in UTC and the same time after a UTC->TAI->UTC conversion
return np.all(time_1.tai == time_2.tai)
# We also deem the times equal if they are both None
return time_1 is None and time_2 is None
# =============================================================================
# ------------------------- Transformation Framework --------------------------
# =============================================================================
def _transform_obstime(frame, obstime):
"""
Transform a frame to a new obstime using the appropriate loopback transformation.
If the new obstime is None, no transformation is performed.
If the frame's obstime is None, the frame is copied with the new obstime.
"""
# If obstime is None or the obstime matches, nothing needs to be done
if obstime is None or _times_are_equal(frame.obstime, obstime):
return frame
# Transform to the new obstime using the appropriate loopback transformation
new_frame = frame.replicate(obstime=obstime)
if frame.obstime is not None:
return frame.transform_to(new_frame)
else:
return new_frame
def _rotation_matrix_hgs_to_hgc(obstime, observer_distance_from_sun):
"""
Return the rotation matrix from HGS to HGC at the same observation time
"""
if obstime is None:
raise ConvertError("To perform this transformation, the coordinate"
" frame needs a specified `obstime`.")
# Import here to avoid a circular import
from .sun import L0, earth_distance
# Calculate the difference in light travel time if the observer is at a different distance from
# the Sun than the Earth is
delta_time = (observer_distance_from_sun - earth_distance(obstime)) / speed_of_light
# Calculate the corresponding difference in apparent longitude
delta_lon = delta_time * constants.sidereal_rotation_rate
# Rotation is only in longitude, so only around the Z axis
return rotation_matrix(-(L0(obstime) + delta_lon), 'z')
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliographicStonyhurst, HeliographicCarrington)
@_transformation_debug("HGS->HGC")
def hgs_to_hgc(hgscoord, hgcframe):
"""
Convert from Heliographic Stonyhurst to Heliographic Carrington.
"""
_check_observer_defined(hgcframe)
if isinstance(hgcframe.observer, str) and hgcframe.observer == "self":
observer_radius = hgscoord.radius
else:
observer_radius = hgcframe.observer.radius
# First transform the HGS coord to the HGC obstime
int_coord = _transform_obstime(hgscoord, hgcframe.obstime)
# Rotate from HGS to HGC
total_matrix = _rotation_matrix_hgs_to_hgc(int_coord.obstime, observer_radius)
newrepr = int_coord.cartesian.transform(total_matrix)
return hgcframe._replicate(newrepr, obstime=int_coord.obstime)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliographicCarrington, HeliographicStonyhurst)
@_transformation_debug("HGC->HGS")
def hgc_to_hgs(hgccoord, hgsframe):
"""
Convert from Heliographic Carrington to Heliographic Stonyhurst.
"""
_check_observer_defined(hgccoord)
hgccoord = hgccoord.make_3d()
if isinstance(hgccoord.observer, str) and hgccoord.observer == "self":
observer_radius = hgccoord.radius
else:
observer_radius = hgccoord.observer.radius
# First transform the HGC coord to the HGS obstime
int_coord = _transform_obstime(hgccoord, hgsframe.obstime)
# Rotate from HGC to HGS
total_matrix = matrix_transpose(_rotation_matrix_hgs_to_hgc(int_coord.obstime,
observer_radius))
newrepr = int_coord.cartesian.transform(total_matrix)
return hgsframe._replicate(newrepr, obstime=int_coord.obstime)
def _matrix_hcc_to_hpc():
# Returns the transformation matrix that permutes/swaps axes from HCC to HPC
# HPC spherical coordinates are a left-handed frame with these equivalent Cartesian axes:
# HPC_X = -HCC_Z
# HPC_Y = HCC_X
# HPC_Z = HCC_Y
# (HPC_X and HPC_Y are not to be confused with HPC_Tx and HPC_Ty)
return np.array([[0, 0, -1],
[1, 0, 0],
[0, 1, 0]])
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
Heliocentric, Helioprojective)
@_transformation_debug("HCC->HPC")
def hcc_to_hpc(helioccoord, heliopframe):
"""
Convert from Heliocentric Cartesian to Helioprojective Cartesian.
"""
_check_observer_defined(helioccoord)
_check_observer_defined(heliopframe)
# Transform the HPC observer (in HGS) to the HPC obstime in case it's different
observer = _transform_obstime(heliopframe.observer, heliopframe.obstime)
# Loopback transform HCC coord to obstime and observer of HPC frame
int_frame = Heliocentric(obstime=observer.obstime, observer=observer)
int_coord = helioccoord.transform_to(int_frame)
# Shift the origin from the Sun to the observer
distance = int_coord.observer.radius
newrepr = int_coord.cartesian - CartesianRepresentation(0*u.m, 0*u.m, distance)
# Permute/swap axes from HCC to HPC equivalent Cartesian
newrepr = newrepr.transform(_matrix_hcc_to_hpc())
# Explicitly represent as spherical because external code (e.g., wcsaxes) expects it
return heliopframe.realize_frame(newrepr.represent_as(SphericalRepresentation))
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
Helioprojective, Heliocentric)
@_transformation_debug("HPC->HCC")
def hpc_to_hcc(heliopcoord, heliocframe):
"""
Convert from Helioprojective Cartesian to Heliocentric Cartesian.
"""
_check_observer_defined(heliopcoord)
_check_observer_defined(heliocframe)
heliopcoord = heliopcoord.make_3d()
# Permute/swap axes from HPC equivalent Cartesian to HCC
newrepr = heliopcoord.cartesian.transform(matrix_transpose(_matrix_hcc_to_hpc()))
# Transform the HPC observer (in HGS) to the HPC obstime in case it's different
observer = _transform_obstime(heliopcoord.observer, heliopcoord.obstime)
# Shift the origin from the observer to the Sun
distance = observer.radius
newrepr += CartesianRepresentation(0*u.m, 0*u.m, distance)
# Complete the conversion of HPC to HCC at the obstime and observer of the HPC coord
int_coord = Heliocentric(newrepr, obstime=observer.obstime, observer=observer)
# Loopback transform HCC as needed
return int_coord.transform_to(heliocframe)
def _rotation_matrix_hcc_to_hgs(longitude, latitude):
# Returns the rotation matrix from HCC to HGS based on the observer longitude and latitude
# Permute the axes of HCC to match HGS Cartesian equivalent
# HGS_X = HCC_Z
# HGS_Y = HCC_X
# HGS_Z = HCC_Y
axes_matrix = np.array([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
# Rotate in latitude and longitude (sign difference because of direction difference)
lat_matrix = rotation_matrix(latitude, 'y')
lon_matrix = rotation_matrix(-longitude, 'z')
return lon_matrix @ lat_matrix @ axes_matrix
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
Heliocentric, HeliographicStonyhurst)
@_transformation_debug("HCC->HGS")
def hcc_to_hgs(helioccoord, heliogframe):
"""
Convert from Heliocentric Cartesian to Heliographic Stonyhurst.
"""
_check_observer_defined(helioccoord)
# Transform the HCC observer (in HGS) to the HCC obstime in case it's different
hcc_observer_at_hcc_obstime = _transform_obstime(helioccoord.observer, helioccoord.obstime)
total_matrix = _rotation_matrix_hcc_to_hgs(hcc_observer_at_hcc_obstime.lon,
hcc_observer_at_hcc_obstime.lat)
# Transform from HCC to HGS at the HCC obstime
newrepr = helioccoord.cartesian.transform(total_matrix)
int_coord = HeliographicStonyhurst(newrepr, obstime=hcc_observer_at_hcc_obstime.obstime)
# For historical reasons, we support HCC with no obstime transforming to HGS with an obstime
if int_coord.obstime is None and heliogframe.obstime is not None:
int_coord = int_coord.replicate(obstime=heliogframe.obstime)
# Loopback transform HGS as needed
return int_coord.transform_to(heliogframe)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliographicStonyhurst, Heliocentric)
@_transformation_debug("HGS->HCC")
def hgs_to_hcc(heliogcoord, heliocframe):
"""
Convert from Heliographic Stonyhurst to Heliocentric Cartesian.
"""
_check_observer_defined(heliocframe)
heliogcoord = heliogcoord.make_3d()
# Loopback transform HGS if there is a change in obstime
int_coord = _transform_obstime(heliogcoord, heliocframe.obstime)
# Transform the HCC observer (in HGS) to the HCC obstime in case it's different
hcc_observer_at_hcc_obstime = _transform_obstime(heliocframe.observer, int_coord.obstime)
total_matrix = matrix_transpose(_rotation_matrix_hcc_to_hgs(hcc_observer_at_hcc_obstime.lon,
hcc_observer_at_hcc_obstime.lat))
# Transform from HGS to HCC at the same obstime
newrepr = int_coord.cartesian.transform(total_matrix)
return heliocframe.realize_frame(newrepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
Helioprojective, Helioprojective)
@_transformation_debug("HPC->HPC")
def hpc_to_hpc(from_coo, to_frame):
"""
This converts from HPC to HPC, with different observer location parameters.
It does this by transforming through HGS.
"""
if _observers_are_equal(from_coo.observer, to_frame.observer) and \
_times_are_equal(from_coo.obstime, to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
_check_observer_defined(from_coo)
_check_observer_defined(to_frame)
hgs = from_coo.transform_to(HeliographicStonyhurst(obstime=to_frame.obstime))
hpc = hgs.transform_to(to_frame)
return hpc
def _rotation_matrix_reprs_to_reprs(start_representation, end_representation):
"""
Return the matrix for the direct rotation from one representation to a second representation.
The representations need not be normalized first, and can be arrays of representations.
"""
A = start_representation.to_cartesian()
B = end_representation.to_cartesian()
rotation_axis = A.cross(B)
rotation_angle = -np.arccos(A.dot(B) / (A.norm() * B.norm())) # negation is required
if rotation_angle.isscalar:
# This line works around some input/output quirks of Astropy's rotation_matrix()
matrix = np.array(rotation_matrix(rotation_angle, rotation_axis.xyz.value.tolist()))
else:
matrix_list = [np.array(rotation_matrix(angle, axis.xyz.value.tolist()))
for angle, axis in zip(rotation_angle, rotation_axis)]
matrix = np.stack(matrix_list)
return matrix
def _rotation_matrix_reprs_to_xz_about_z(representations):
"""
Return one or more matrices for rotating one or more representations around the Z axis into the
XZ plane.
"""
A = representations.to_cartesian()
# Zero out the Z components
# (The additional transpose operations are to handle both scalar and array inputs)
A_no_z = CartesianRepresentation((A.xyz.T * [1, 1, 0]).T)
# Rotate the resulting vector to the X axis
x_axis = CartesianRepresentation(1, 0, 0)
matrix = _rotation_matrix_reprs_to_reprs(A_no_z, x_axis)
return matrix
def _sun_earth_icrf(time):
"""
Return the Sun-Earth vector for ICRF-based frames.
"""
sun_pos_icrs = get_body_barycentric('sun', time)
earth_pos_icrs = get_body_barycentric('earth', time)
return earth_pos_icrs - sun_pos_icrs
# The Sun's north pole is oriented RA=286.13 deg, dec=63.87 deg in ICRS, and thus HCRS as well
# (See Archinal et al. 2011,
# "Report of the IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009")
# The orientation of the north pole in ICRS/HCRS is assumed to be constant in time
_SOLAR_NORTH_POLE_HCRS = UnitSphericalRepresentation(lon=constants.get('alpha_0'),
lat=constants.get('delta_0'))
# Calculate the rotation matrix to de-tilt the Sun's rotation axis to be parallel to the Z axis
_SUN_DETILT_MATRIX = _rotation_matrix_reprs_to_reprs(_SOLAR_NORTH_POLE_HCRS,
CartesianRepresentation(0, 0, 1))
def _affine_params_hcrs_to_hgs(hcrs_time, hgs_time):
"""
Return the affine parameters (matrix and offset) from HCRS to HGS
HGS shares the same origin (the Sun) as HCRS, but has its Z axis aligned with the Sun's
rotation axis and its X axis aligned with the projection of the Sun-Earth vector onto the Sun's
equatorial plane (i.e., the component of the Sun-Earth vector perpendicular to the Z axis).
Thus, the transformation matrix is the product of the matrix to align the Z axis (by de-tilting
the Sun's rotation axis) and the matrix to align the X axis. The first matrix is independent
of time and is pre-computed, while the second matrix depends on the time-varying Sun-Earth
vector.
"""
# Determine the Sun-Earth vector in ICRS
# Since HCRS is ICRS with an origin shift, this is also the Sun-Earth vector in HCRS
sun_pos_icrs = get_body_barycentric('sun', hgs_time)
earth_pos_icrs = get_body_barycentric('earth', hgs_time)
sun_earth = earth_pos_icrs - sun_pos_icrs
# De-tilt the Sun-Earth vector to the frame with the Sun's rotation axis parallel to the Z axis
sun_earth_detilt = sun_earth.transform(_SUN_DETILT_MATRIX)
# Rotate the Sun-Earth vector about the Z axis so that it lies in the XZ plane
rot_matrix = _rotation_matrix_reprs_to_xz_about_z(sun_earth_detilt)
total_matrix = rot_matrix @ _SUN_DETILT_MATRIX
# All of the above is calculated for the HGS observation time
# If the HCRS observation time is different, calculate the translation in origin
if not _ignore_sun_motion and np.any(hcrs_time != hgs_time):
sun_pos_old_icrs = get_body_barycentric('sun', hcrs_time)
offset_icrf = sun_pos_old_icrs - sun_pos_icrs
else:
offset_icrf = sun_pos_icrs * 0 # preserves obstime shape
offset = offset_icrf.transform(total_matrix)
return total_matrix, offset
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HCRS, HeliographicStonyhurst)
@_transformation_debug("HCRS->HGS")
def hcrs_to_hgs(hcrscoord, hgsframe):
"""
Convert from HCRS to Heliographic Stonyhurst (HGS).
Even though we calculate the parameters for the affine transform, we use
``FunctionTransformWithFiniteDifference`` because otherwise there is no way to account for the
induced angular velocity when transforming a coordinate with velocity information.
"""
if hgsframe.obstime is None:
raise ConvertError("To perform this transformation, the HeliographicStonyhurst"
" frame needs a specified `obstime`.")
rot_matrix, offset = _affine_params_hcrs_to_hgs(hcrscoord.obstime, hgsframe.obstime)
return hgsframe.realize_frame(hcrscoord.cartesian.transform(rot_matrix) + offset)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliographicStonyhurst, HCRS)
@_transformation_debug("HGS->HCRS")
def hgs_to_hcrs(hgscoord, hcrsframe):
"""
Convert from Heliographic Stonyhurst to HCRS.
Even though we calculate the parameters for the affine transform, we use
``FunctionTransformWithFiniteDifference`` because otherwise there is no way to account for the
induced angular velocity when transforming a coordinate with velocity information.
"""
if hgscoord.obstime is None:
raise ConvertError("To perform this transformation, the HeliographicStonyhurst"
" frame needs a specified `obstime`.")
hgscoord = hgscoord.make_3d()
# Calculate the matrix and offset in the HCRS->HGS direction
forward_matrix, forward_offset = _affine_params_hcrs_to_hgs(hcrsframe.obstime, hgscoord.obstime)
# Invert the transformation to get the HGS->HCRS transformation
reverse_matrix = matrix_transpose(forward_matrix)
reverse_offset = (-forward_offset).transform(reverse_matrix)
return hcrsframe.realize_frame(hgscoord.cartesian.transform(reverse_matrix) + reverse_offset)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliographicStonyhurst, HeliographicStonyhurst)
@_transformation_debug("HGS->HGS")
def hgs_to_hgs(from_coo, to_frame):
"""
Convert between two Heliographic Stonyhurst frames.
"""
if to_frame.obstime is None:
return from_coo.replicate()
elif _times_are_equal(from_coo.obstime, to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
else:
if _autoapply_diffrot:
from_coo = from_coo._apply_diffrot((to_frame.obstime - from_coo.obstime).to('day'),
_autoapply_diffrot)
return from_coo.transform_to(HCRS(obstime=to_frame.obstime)).transform_to(to_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliographicCarrington, HeliographicCarrington)
@_transformation_debug("HGC->HGC")
def hgc_to_hgc(from_coo, to_frame):
"""
Convert between two Heliographic Carrington frames.
"""
if _observers_are_equal(from_coo.observer, to_frame.observer) and \
_times_are_equal(from_coo.obstime, to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
_check_observer_defined(from_coo)
_check_observer_defined(to_frame)
# Convert through HGS
hgscoord = from_coo.transform_to(HeliographicStonyhurst(obstime=from_coo.obstime))
return hgscoord.transform_to(to_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
Heliocentric, Heliocentric)
@_transformation_debug("HCC->HCC")
def hcc_to_hcc(from_coo, to_frame):
"""
Convert between two Heliocentric frames.
"""
if _observers_are_equal(from_coo.observer, to_frame.observer) and \
_times_are_equal(from_coo.obstime, to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
_check_observer_defined(from_coo)
_check_observer_defined(to_frame)
# Convert through HGS
hgscoord = from_coo.transform_to(HeliographicStonyhurst(obstime=to_frame.obstime))
return hgscoord.transform_to(to_frame)
def _rotation_matrix_hme_to_hee(hmeframe):
"""
Return the rotation matrix from HME to HEE at the same observation time
"""
# Get the Sun-Earth vector
sun_earth = HCRS(_sun_earth_icrf(hmeframe.obstime), obstime=hmeframe.obstime)
sun_earth_hme = sun_earth.transform_to(hmeframe).cartesian
# Rotate the Sun-Earth vector about the Z axis so that it lies in the XZ plane
rot_matrix = _rotation_matrix_reprs_to_xz_about_z(sun_earth_hme)
# Tilt the rotated Sun-Earth vector so that it is aligned with the X axis
tilt_matrix = _rotation_matrix_reprs_to_reprs(sun_earth_hme.transform(rot_matrix),
CartesianRepresentation(1, 0, 0))
return matrix_product(tilt_matrix, rot_matrix)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliocentricMeanEcliptic, HeliocentricEarthEcliptic)
@_transformation_debug("HME->HEE")
def hme_to_hee(hmecoord, heeframe):
"""
Convert from Heliocentric Mean Ecliptic to Heliocentric Earth Ecliptic
"""
if heeframe.obstime is None:
raise ConvertError("To perform this transformation, the coordinate"
" frame needs a specified `obstime`.")
# Convert to the HME frame with mean equinox of date at the HEE obstime, through HCRS
int_frame = HeliocentricMeanEcliptic(obstime=heeframe.obstime, equinox=heeframe.obstime)
int_coord = hmecoord.transform_to(HCRS(obstime=hmecoord.obstime)).transform_to(int_frame)
# Rotate the intermediate coord to the HEE frame
total_matrix = _rotation_matrix_hme_to_hee(int_frame)
newrepr = int_coord.cartesian.transform(total_matrix)
return heeframe.realize_frame(newrepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliocentricEarthEcliptic, HeliocentricMeanEcliptic)
@_transformation_debug("HEE->HME")
def hee_to_hme(heecoord, hmeframe):
"""
Convert from Heliocentric Earth Ecliptic to Heliocentric Mean Ecliptic
"""
if heecoord.obstime is None:
raise ConvertError("To perform this transformation, the coordinate"
" frame needs a specified `obstime`.")
int_frame = HeliocentricMeanEcliptic(obstime=heecoord.obstime, equinox=heecoord.obstime)
# Rotate the HEE coord to the intermediate frame
total_matrix = matrix_transpose(_rotation_matrix_hme_to_hee(int_frame))
int_repr = heecoord.cartesian.transform(total_matrix)
int_coord = int_frame.realize_frame(int_repr)
# Convert to the HME frame through HCRS
return int_coord.transform_to(HCRS(obstime=int_coord.obstime)).transform_to(hmeframe)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliocentricEarthEcliptic, HeliocentricEarthEcliptic)
@_transformation_debug("HEE->HEE")
def hee_to_hee(from_coo, to_frame):
"""
Convert between two Heliocentric Earth Ecliptic frames.
"""
if _times_are_equal(from_coo.obstime, to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
elif to_frame.obstime is None:
return from_coo
else:
return from_coo.transform_to(HCRS(obstime=from_coo.obstime)).transform_to(to_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliocentricEarthEcliptic, GeocentricSolarEcliptic)
@_transformation_debug("HEE->GSE")
def hee_to_gse(heecoord, gseframe):
"""
Convert from Heliocentric Earth Ecliptic to Geocentric Solar Ecliptic
"""
# First transform the HEE coord to the GSE obstime
int_coord = _transform_obstime(heecoord, gseframe.obstime)
if int_coord.obstime is None:
raise ConvertError("To perform this transformation, the coordinate"
" frame needs a specified `obstime`.")
# Import here to avoid a circular import
from .sun import earth_distance
# Find the Earth-object vector in the intermediate frame
sun_earth_int = earth_distance(int_coord.obstime) * CartesianRepresentation(1, 0, 0)
earth_object_int = int_coord.cartesian - sun_earth_int
# Flip the vector in X and Y, but leave Z untouched
# (The additional transpose operations are to handle both scalar and array inputs)
newrepr = CartesianRepresentation((earth_object_int.xyz.T * [-1, -1, 1]).T)
return gseframe._replicate(newrepr, obstime=int_coord.obstime)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
GeocentricSolarEcliptic, HeliocentricEarthEcliptic)
@_transformation_debug("GSE->HEE")
def gse_to_hee(gsecoord, heeframe):
"""
Convert from Geocentric Solar Ecliptic to Heliocentric Earth Ecliptic
"""
# First transform the GSE coord to the HEE obstime
int_coord = _transform_obstime(gsecoord, heeframe.obstime)
if int_coord.obstime is None:
raise ConvertError("To perform this transformation, the coordinate"
" frame needs a specified `obstime`.")
# Import here to avoid a circular import
from .sun import earth_distance
# Find the Sun-object vector in the intermediate frame
earth_sun_int = earth_distance(int_coord.obstime) * CartesianRepresentation(1, 0, 0)
sun_object_int = int_coord.cartesian - earth_sun_int
# Flip the vector in X and Y, but leave Z untouched
# (The additional transpose operations are to handle both scalar and array inputs)
newrepr = CartesianRepresentation((sun_object_int.xyz.T * [-1, -1, 1]).T)
return heeframe._replicate(newrepr, obstime=int_coord.obstime)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
GeocentricSolarEcliptic, GeocentricSolarEcliptic)
@_transformation_debug("GSE->GSE")
def gse_to_gse(from_coo, to_frame):
"""
Convert between two Geocentric Solar Ecliptic frames.
"""
if _times_are_equal(from_coo.obstime, to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
else:
heecoord = from_coo.transform_to(HeliocentricEarthEcliptic(obstime=from_coo.obstime))
return heecoord.transform_to(to_frame)
def _rotation_matrix_hgs_to_hci(obstime):
"""
Return the rotation matrix from HGS to HCI at the same observation time
"""
z_axis = CartesianRepresentation(0, 0, 1)*u.m
if not obstime.isscalar:
z_axis = z_axis._apply('repeat', obstime.size)
# Get the ecliptic pole in HGS
ecliptic_pole = HeliocentricMeanEcliptic(z_axis, obstime=obstime, equinox=_J2000)
ecliptic_pole_hgs = ecliptic_pole.transform_to(HeliographicStonyhurst(obstime=obstime))
# Rotate the ecliptic pole to the -YZ plane, which aligns the solar ascending node with the X
# axis
rot_matrix = _rotation_matrix_reprs_to_xz_about_z(ecliptic_pole_hgs.cartesian)
xz_to_yz_matrix = rotation_matrix(-90*u.deg, 'z')
return xz_to_yz_matrix @ rot_matrix
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliographicStonyhurst, HeliocentricInertial)
@_transformation_debug("HGS->HCI")
def hgs_to_hci(hgscoord, hciframe):
"""
Convert from Heliographic Stonyhurst to Heliocentric Inertial
"""
hgscoord = hgscoord.make_3d()
# First transform the HGS coord to the HCI obstime
int_coord = _transform_obstime(hgscoord, hciframe.obstime)
if int_coord.obstime is None:
raise ConvertError("To perform this transformation, the coordinate"
" frame needs a specified `obstime`.")
# Rotate from HGS to HCI
total_matrix = _rotation_matrix_hgs_to_hci(int_coord.obstime)
newrepr = int_coord.cartesian.transform(total_matrix)
return hciframe._replicate(newrepr, obstime=int_coord.obstime)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliocentricInertial, HeliographicStonyhurst)
@_transformation_debug("HCI->HGS")
def hci_to_hgs(hcicoord, hgsframe):
"""
Convert from Heliocentric Inertial to Heliographic Stonyhurst
"""
# First transform the HCI coord to the HGS obstime
int_coord = _transform_obstime(hcicoord, hgsframe.obstime)
if int_coord.obstime is None:
raise ConvertError("To perform this transformation, the coordinate"
" frame needs a specified `obstime`.")
# Rotate from HCI to HGS
total_matrix = matrix_transpose(_rotation_matrix_hgs_to_hci(int_coord.obstime))
newrepr = int_coord.cartesian.transform(total_matrix)
return hgsframe._replicate(newrepr, obstime=int_coord.obstime)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliocentricInertial, HeliocentricInertial)
@_transformation_debug("HCI->HCI")
def hci_to_hci(from_coo, to_frame):
"""
Convert between two Heliocentric Inertial frames.
"""
if _times_are_equal(from_coo.obstime, to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
else:
return from_coo.transform_to(HeliographicStonyhurst(obstime=from_coo.obstime)).\
transform_to(to_frame)
def _rotation_matrix_obliquity(time):
"""
Return the rotation matrix from Earth equatorial to ecliptic coordinates
"""
return rotation_matrix(erfa.obl06(*get_jd12(time, 'tt'))*u.radian, 'x')
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
HeliocentricMeanEcliptic, GeocentricEarthEquatorial)
@_transformation_debug("HME->GEI")
def hme_to_gei(hmecoord, geiframe):
"""
Convert from Heliocentric Mean Ecliptic to Geocentric Earth Equatorial
"""
if geiframe.obstime is None:
raise ConvertError("To perform this transformation, the coordinate"
" frame needs a specified `obstime`.")
# Use an intermediate frame of HME at the GEI observation time, through HCRS
int_frame = HeliocentricMeanEcliptic(obstime=geiframe.obstime, equinox=geiframe.equinox)
int_coord = hmecoord.transform_to(HCRS(obstime=int_frame.obstime)).transform_to(int_frame)
# Get the Sun-Earth vector in the intermediate frame
sun_earth = HCRS(_sun_earth_icrf(int_frame.obstime), obstime=int_frame.obstime)
sun_earth_int = sun_earth.transform_to(int_frame).cartesian
# Find the Earth-object vector in the intermediate frame
earth_object_int = int_coord.cartesian - sun_earth_int
# Rotate from ecliptic to Earth equatorial
rot_matrix = matrix_transpose(_rotation_matrix_obliquity(int_frame.equinox))
newrepr = earth_object_int.transform(rot_matrix)
return geiframe.realize_frame(newrepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
GeocentricEarthEquatorial, HeliocentricMeanEcliptic)
@_transformation_debug("GEI->HME")
def gei_to_hme(geicoord, hmeframe):
"""
Convert from Geocentric Earth Equatorial to Heliocentric Mean Ecliptic
"""
if geicoord.obstime is None:
raise ConvertError("To perform this transformation, the coordinate"
" frame needs a specified `obstime`.")
# Use an intermediate frame of HME at the GEI observation time
int_frame = HeliocentricMeanEcliptic(obstime=geicoord.obstime, equinox=geicoord.equinox)
# Get the Sun-Earth vector in the intermediate frame
sun_earth = HCRS(_sun_earth_icrf(int_frame.obstime), obstime=int_frame.obstime)
sun_earth_int = sun_earth.transform_to(int_frame).cartesian
# Rotate from Earth equatorial to ecliptic
rot_matrix = _rotation_matrix_obliquity(int_frame.equinox)
earth_object_int = geicoord.cartesian.transform(rot_matrix)
# Find the Sun-object vector in the intermediate frame
sun_object_int = sun_earth_int + earth_object_int
int_coord = int_frame.realize_frame(sun_object_int)
# Convert to the final frame through HCRS
return int_coord.transform_to(HCRS(obstime=int_coord.obstime)).transform_to(hmeframe)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
GeocentricEarthEquatorial, GeocentricEarthEquatorial)
@_transformation_debug("GEI->GEI")
def gei_to_gei(from_coo, to_frame):
"""
Convert between two Geocentric Earth Equatorial frames.
"""
if _times_are_equal(from_coo.equinox, to_frame.equinox) and \
_times_are_equal(from_coo.obstime, to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
else:
return from_coo.transform_to(HCRS(obstime=from_coo.obstime)).transform_to(to_frame)
def _make_sunpy_graph():
"""
Culls down the full transformation graph for SunPy purposes and returns the string version
"""
# Frames to keep in the transformation graph
keep_list = ['icrs', 'hcrs', 'heliocentrictrueecliptic', 'heliocentricmeanecliptic',
'heliographic_stonyhurst', 'heliographic_carrington',
'heliocentric', 'helioprojective',
'heliocentricearthecliptic', 'geocentricsolarecliptic',
'heliocentricinertial', 'geocentricearthequatorial',
'gcrs', 'precessedgeocentric', 'geocentrictrueecliptic', 'geocentricmeanecliptic',
'cirs', 'altaz', 'itrs']
small_graph = deepcopy(frame_transform_graph)
cull_list = [name for name in small_graph.get_names() if name not in keep_list]
cull_frames = [small_graph.lookup_name(name) for name in cull_list]
for frame in cull_frames:
# Remove the part of the graph where the unwanted frame is the source frame
if frame in small_graph._graph:
del small_graph._graph[frame]
# Remove all instances of the unwanted frame as the destination frame
for entry in small_graph._graph:
if frame in small_graph._graph[entry]:
del (small_graph._graph[entry])[frame]
# Clean up the node list
for name in cull_list:
small_graph._cached_names.pop(name)
_add_astropy_node(small_graph)
docstr = make_transform_graph_docs(small_graph)
# Make adjustments to the graph
docstr = _tweak_graph(docstr)
return docstr
def _add_astropy_node(graph):
"""
Add an 'Astropy' node that links to an ICRS node in the graph
"""
class Astropy(BaseCoordinateFrame):
name = "REPLACE"
@graph.transform(FunctionTransform, Astropy, ICRS)
def fake_transform1():
pass
@graph.transform(FunctionTransform, ICRS, Astropy)
def fake_transform2():
pass
def _tweak_graph(docstr):
# Remove Astropy's diagram description
output = docstr[docstr.find('.. Wrap the graph'):]
# Change the Astropy node
output = output.replace('Astropy [shape=oval label="Astropy\\n`REPLACE`"]',
'Astropy [shape=box3d style=filled fillcolor=lightcyan '
'label="Other frames\\nin Astropy"]')
# Change the Astropy<->ICRS links to black
output = output.replace('ICRS -> Astropy[ color = "#783001" ]',
'ICRS -> Astropy[ color = "#000000" ]')
output = output.replace('Astropy -> ICRS[ color = "#783001" ]',
'Astropy -> ICRS[ color = "#000000" ]')
# Set the nodes to be filled and cyan by default
output = output.replace('AstropyCoordinateTransformGraph {',
'AstropyCoordinateTransformGraph {\n'
' node [style=filled fillcolor=lightcyan]')
# Set the nodes for SunPy frames to be white
sunpy_frames = ['HeliographicStonyhurst', 'HeliographicCarrington',
'Heliocentric', 'Helioprojective',
'HeliocentricEarthEcliptic', 'GeocentricSolarEcliptic',
'HeliocentricInertial', 'GeocentricEarthEquatorial']
for frame in sunpy_frames:
output = output.replace(frame + ' [', frame + ' [fillcolor=white ')
# Set the rank direction to be left->right (as opposed to top->bottom)
# Force nodes for ICRS, HCRS, and "Other frames in Astropy" to be at the same rank
output = output.replace(' overlap=false',
' overlap=false\n'
' rankdir=LR\n'
' {rank=same; ICRS; HCRS; Astropy}')
output = output.replace('<ul>\n\n',
'<ul>\n\n' +
_add_legend_row('SunPy frames', 'white') +
_add_legend_row('Astropy frames', 'lightcyan'))
return output
def _add_legend_row(label, color):
row = ' <li style="list-style: none;">\n'\
' <p style="font-size: 12px;line-height: 24px;font-weight: normal;'\
'color: #848484;padding: 0;margin: 0;">\n'\
' <b>' + label + ':</b>\n'\
' <span class="dot" style="height: 20px;width: 40px;'\
'background-color: ' + color + ';border-radius: 50%;border: 1px solid black;'\
'display: inline-block;"></span>\n'\
' </p>\n'\
' </li>\n\n\n'
return row
|
[
"astropy.coordinates.ConvertError",
"astropy.coordinates.matrix_utilities.matrix_product",
"sunpy.sun.constants.get",
"astropy.coordinates.HeliocentricMeanEcliptic",
"astropy.units.allclose",
"astropy.coordinates.representation.CartesianRepresentation",
"astropy.coordinates.HCRS",
"astropy.coordinates.get_body_barycentric",
"numpy.stack",
"copy.deepcopy",
"astropy.coordinates.builtin_frames.make_transform_graph_docs",
"sunpy.log.debug",
"astropy.coordinates.builtin_frames.utils.get_jd12",
"functools.wraps",
"sunpy.log.getEffectiveLevel",
"numpy.all",
"astropy.coordinates.baseframe.frame_transform_graph.transform",
"astropy.coordinates.matrix_utilities.matrix_transpose",
"numpy.any",
"astropy.coordinates.matrix_utilities.rotation_matrix",
"numpy.array"
] |
[((15927, 16049), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliographicStonyhurst', 'HeliographicCarrington'], {}), '(FunctionTransformWithFiniteDifference,\n HeliographicStonyhurst, HeliographicCarrington)\n', (15958, 16049), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((16812, 16934), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliographicCarrington', 'HeliographicStonyhurst'], {}), '(FunctionTransformWithFiniteDifference,\n HeliographicCarrington, HeliographicStonyhurst)\n', (16843, 16934), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((18254, 18359), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'Heliocentric', 'Helioprojective'], {}), '(FunctionTransformWithFiniteDifference,\n Heliocentric, Helioprojective)\n', (18285, 18359), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((19466, 19571), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'Helioprojective', 'Heliocentric'], {}), '(FunctionTransformWithFiniteDifference,\n Helioprojective, Heliocentric)\n', (19497, 19571), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((21245, 21357), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'Heliocentric', 'HeliographicStonyhurst'], {}), '(FunctionTransformWithFiniteDifference,\n Heliocentric, HeliographicStonyhurst)\n', (21276, 21357), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((22463, 22575), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliographicStonyhurst', 'Heliocentric'], {}), '(FunctionTransformWithFiniteDifference,\n HeliographicStonyhurst, Heliocentric)\n', (22494, 22575), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((23514, 23622), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'Helioprojective', 'Helioprojective'], {}), '(FunctionTransformWithFiniteDifference,\n Helioprojective, Helioprojective)\n', (23545, 23622), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((28675, 28779), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HCRS', 'HeliographicStonyhurst'], {}), '(FunctionTransformWithFiniteDifference, HCRS,\n HeliographicStonyhurst)\n', (28706, 28779), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((29586, 29690), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliographicStonyhurst', 'HCRS'], {}), '(FunctionTransformWithFiniteDifference,\n HeliographicStonyhurst, HCRS)\n', (29617, 29690), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((30803, 30925), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliographicStonyhurst', 'HeliographicStonyhurst'], {}), '(FunctionTransformWithFiniteDifference,\n HeliographicStonyhurst, HeliographicStonyhurst)\n', (30834, 30925), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((31582, 31704), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliographicCarrington', 'HeliographicCarrington'], {}), '(FunctionTransformWithFiniteDifference,\n HeliographicCarrington, HeliographicCarrington)\n', (31613, 31704), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((32301, 32403), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'Heliocentric', 'Heliocentric'], {}), '(FunctionTransformWithFiniteDifference,\n Heliocentric, Heliocentric)\n', (32332, 32403), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((33757, 33884), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliocentricMeanEcliptic', 'HeliocentricEarthEcliptic'], {}), '(FunctionTransformWithFiniteDifference,\n HeliocentricMeanEcliptic, HeliocentricEarthEcliptic)\n', (33788, 33884), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((34746, 34873), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliocentricEarthEcliptic', 'HeliocentricMeanEcliptic'], {}), '(FunctionTransformWithFiniteDifference,\n HeliocentricEarthEcliptic, HeliocentricMeanEcliptic)\n', (34777, 34873), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((35710, 35838), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliocentricEarthEcliptic', 'HeliocentricEarthEcliptic'], {}), '(FunctionTransformWithFiniteDifference,\n HeliocentricEarthEcliptic, HeliocentricEarthEcliptic)\n', (35741, 35838), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((36293, 36419), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliocentricEarthEcliptic', 'GeocentricSolarEcliptic'], {}), '(FunctionTransformWithFiniteDifference,\n HeliocentricEarthEcliptic, GeocentricSolarEcliptic)\n', (36324, 36419), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((37492, 37618), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'GeocentricSolarEcliptic', 'HeliocentricEarthEcliptic'], {}), '(FunctionTransformWithFiniteDifference,\n GeocentricSolarEcliptic, HeliocentricEarthEcliptic)\n', (37523, 37618), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((38685, 38809), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'GeocentricSolarEcliptic', 'GeocentricSolarEcliptic'], {}), '(FunctionTransformWithFiniteDifference,\n GeocentricSolarEcliptic, GeocentricSolarEcliptic)\n', (38716, 38809), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((40024, 40144), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliographicStonyhurst', 'HeliocentricInertial'], {}), '(FunctionTransformWithFiniteDifference,\n HeliographicStonyhurst, HeliocentricInertial)\n', (40055, 40144), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((40883, 41003), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliocentricInertial', 'HeliographicStonyhurst'], {}), '(FunctionTransformWithFiniteDifference,\n HeliocentricInertial, HeliographicStonyhurst)\n', (40914, 41003), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((41724, 41842), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliocentricInertial', 'HeliocentricInertial'], {}), '(FunctionTransformWithFiniteDifference,\n HeliocentricInertial, HeliocentricInertial)\n', (41755, 41842), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((42473, 42600), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'HeliocentricMeanEcliptic', 'GeocentricEarthEquatorial'], {}), '(FunctionTransformWithFiniteDifference,\n HeliocentricMeanEcliptic, GeocentricEarthEquatorial)\n', (42504, 42600), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((43793, 43920), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'GeocentricEarthEquatorial', 'HeliocentricMeanEcliptic'], {}), '(FunctionTransformWithFiniteDifference,\n GeocentricEarthEquatorial, HeliocentricMeanEcliptic)\n', (43824, 43920), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((45139, 45267), 'astropy.coordinates.baseframe.frame_transform_graph.transform', 'frame_transform_graph.transform', (['FunctionTransformWithFiniteDifference', 'GeocentricEarthEquatorial', 'GeocentricEarthEquatorial'], {}), '(FunctionTransformWithFiniteDifference,\n GeocentricEarthEquatorial, GeocentricEarthEquatorial)\n', (45170, 45267), False, 'from astropy.coordinates.baseframe import frame_transform_graph\n'), ((18164, 18208), 'numpy.array', 'np.array', (['[[0, 0, -1], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 0, -1], [1, 0, 0], [0, 1, 0]])\n', (18172, 18208), True, 'import numpy as np\n'), ((20296, 20347), 'astropy.coordinates.representation.CartesianRepresentation', 'CartesianRepresentation', (['(0 * u.m)', '(0 * u.m)', 'distance'], {}), '(0 * u.m, 0 * u.m, distance)\n', (20319, 20347), False, 'from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation, UnitSphericalRepresentation\n'), ((20904, 20947), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 0, 1], [1, 0, 0], [0, 1, 0]])\n', (20912, 20947), True, 'import numpy as np\n'), ((21111, 21141), 'astropy.coordinates.matrix_utilities.rotation_matrix', 'rotation_matrix', (['latitude', '"""y"""'], {}), "(latitude, 'y')\n", (21126, 21141), False, 'from astropy.coordinates.matrix_utilities import matrix_product, matrix_transpose, rotation_matrix\n'), ((21159, 21191), 'astropy.coordinates.matrix_utilities.rotation_matrix', 'rotation_matrix', (['(-longitude)', '"""z"""'], {}), "(-longitude, 'z')\n", (21174, 21191), False, 'from astropy.coordinates.matrix_utilities import matrix_product, matrix_transpose, rotation_matrix\n'), ((25563, 25611), 'astropy.coordinates.representation.CartesianRepresentation', 'CartesianRepresentation', (['(A.xyz.T * [1, 1, 0]).T'], {}), '((A.xyz.T * [1, 1, 0]).T)\n', (25586, 25611), False, 'from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation, UnitSphericalRepresentation\n'), ((25674, 25706), 'astropy.coordinates.representation.CartesianRepresentation', 'CartesianRepresentation', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (25697, 25706), False, 'from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation, UnitSphericalRepresentation\n'), ((25906, 25939), 'astropy.coordinates.get_body_barycentric', 'get_body_barycentric', (['"""sun"""', 'time'], {}), "('sun', time)\n", (25926, 25939), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((25961, 25996), 'astropy.coordinates.get_body_barycentric', 'get_body_barycentric', (['"""earth"""', 'time'], {}), "('earth', time)\n", (25981, 25996), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((26738, 26770), 'astropy.coordinates.representation.CartesianRepresentation', 'CartesianRepresentation', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (26761, 26770), False, 'from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation, UnitSphericalRepresentation\n'), ((27660, 27697), 'astropy.coordinates.get_body_barycentric', 'get_body_barycentric', (['"""sun"""', 'hgs_time'], {}), "('sun', hgs_time)\n", (27680, 27697), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((27719, 27758), 'astropy.coordinates.get_body_barycentric', 'get_body_barycentric', (['"""earth"""', 'hgs_time'], {}), "('earth', hgs_time)\n", (27739, 27758), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((30603, 30635), 'astropy.coordinates.matrix_utilities.matrix_transpose', 'matrix_transpose', (['forward_matrix'], {}), '(forward_matrix)\n', (30619, 30635), False, 'from astropy.coordinates.matrix_utilities import matrix_product, matrix_transpose, rotation_matrix\n'), ((33714, 33753), 'astropy.coordinates.matrix_utilities.matrix_product', 'matrix_product', (['tilt_matrix', 'rot_matrix'], {}), '(tilt_matrix, rot_matrix)\n', (33728, 33753), False, 'from astropy.coordinates.matrix_utilities import matrix_product, matrix_transpose, rotation_matrix\n'), ((34358, 34434), 'astropy.coordinates.HeliocentricMeanEcliptic', 'HeliocentricMeanEcliptic', ([], {'obstime': 'heeframe.obstime', 'equinox': 'heeframe.obstime'}), '(obstime=heeframe.obstime, equinox=heeframe.obstime)\n', (34382, 34434), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((35257, 35333), 'astropy.coordinates.HeliocentricMeanEcliptic', 'HeliocentricMeanEcliptic', ([], {'obstime': 'heecoord.obstime', 'equinox': 'heecoord.obstime'}), '(obstime=heecoord.obstime, equinox=heecoord.obstime)\n', (35281, 35333), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((37355, 37420), 'astropy.coordinates.representation.CartesianRepresentation', 'CartesianRepresentation', (['(earth_object_int.xyz.T * [-1, -1, 1]).T'], {}), '((earth_object_int.xyz.T * [-1, -1, 1]).T)\n', (37378, 37420), False, 'from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation, UnitSphericalRepresentation\n'), ((38550, 38613), 'astropy.coordinates.representation.CartesianRepresentation', 'CartesianRepresentation', (['(sun_object_int.xyz.T * [-1, -1, 1]).T'], {}), '((sun_object_int.xyz.T * [-1, -1, 1]).T)\n', (38573, 38613), False, 'from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation, UnitSphericalRepresentation\n'), ((39575, 39640), 'astropy.coordinates.HeliocentricMeanEcliptic', 'HeliocentricMeanEcliptic', (['z_axis'], {'obstime': 'obstime', 'equinox': '_J2000'}), '(z_axis, obstime=obstime, equinox=_J2000)\n', (39599, 39640), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((39948, 39981), 'astropy.coordinates.matrix_utilities.rotation_matrix', 'rotation_matrix', (['(-90 * u.deg)', '"""z"""'], {}), "(-90 * u.deg, 'z')\n", (39963, 39981), False, 'from astropy.coordinates.matrix_utilities import matrix_product, matrix_transpose, rotation_matrix\n'), ((43065, 43141), 'astropy.coordinates.HeliocentricMeanEcliptic', 'HeliocentricMeanEcliptic', ([], {'obstime': 'geiframe.obstime', 'equinox': 'geiframe.equinox'}), '(obstime=geiframe.obstime, equinox=geiframe.equinox)\n', (43089, 43141), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((44371, 44447), 'astropy.coordinates.HeliocentricMeanEcliptic', 'HeliocentricMeanEcliptic', ([], {'obstime': 'geicoord.obstime', 'equinox': 'geicoord.equinox'}), '(obstime=geicoord.obstime, equinox=geicoord.equinox)\n', (44395, 44447), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((46429, 46460), 'copy.deepcopy', 'deepcopy', (['frame_transform_graph'], {}), '(frame_transform_graph)\n', (46437, 46460), False, 'from copy import deepcopy\n'), ((47191, 47229), 'astropy.coordinates.builtin_frames.make_transform_graph_docs', 'make_transform_graph_docs', (['small_graph'], {}), '(small_graph)\n', (47216, 47229), False, 'from astropy.coordinates.builtin_frames import make_transform_graph_docs\n'), ((9788, 9799), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (9793, 9799), False, 'from functools import wraps\n'), ((11552, 11730), 'astropy.coordinates.ConvertError', 'ConvertError', (['f"""The source observer is set to None, but the transformation requires the source observer to be specified, as the destination observer is set to {obs_2}."""'], {}), "(\n f'The source observer is set to None, but the transformation requires the source observer to be specified, as the destination observer is set to {obs_2}.'\n )\n", (11564, 11730), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((11817, 12000), 'astropy.coordinates.ConvertError', 'ConvertError', (['f"""The destination observer is set to None, but the transformation requires the destination observer to be specified, as the source observer is set to {obs_1}."""'], {}), "(\n f'The destination observer is set to None, but the transformation requires the destination observer to be specified, as the source observer is set to {obs_1}.'\n )\n", (11829, 12000), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((12149, 12269), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""The source observer needs to have `obstime` set because the destination observer is different."""'], {}), "(\n 'The source observer needs to have `obstime` set because the destination observer is different.'\n )\n", (12161, 12269), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((12388, 12508), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""The destination observer needs to have `obstime` set because the source observer is different."""'], {}), "(\n 'The destination observer needs to have `obstime` set because the source observer is different.'\n )\n", (12400, 12508), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((12889, 13019), 'astropy.coordinates.ConvertError', 'ConvertError', (['f"""This transformation cannot be performed because the {frame.__class__.__name__} frame has observer=None."""'], {}), "(\n f'This transformation cannot be performed because the {frame.__class__.__name__} frame has observer=None.'\n )\n", (12901, 13019), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((13960, 13992), 'numpy.all', 'np.all', (['(time_1.tai == time_2.tai)'], {}), '(time_1.tai == time_2.tai)\n', (13966, 13992), True, 'import numpy as np\n'), ((15234, 15341), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""To perform this transformation, the coordinate frame needs a specified `obstime`."""'], {}), "(\n 'To perform this transformation, the coordinate frame needs a specified `obstime`.'\n )\n", (15246, 15341), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((19125, 19176), 'astropy.coordinates.representation.CartesianRepresentation', 'CartesianRepresentation', (['(0 * u.m)', '(0 * u.m)', 'distance'], {}), '(0 * u.m, 0 * u.m, distance)\n', (19148, 19176), False, 'from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation, UnitSphericalRepresentation\n'), ((25159, 25180), 'numpy.stack', 'np.stack', (['matrix_list'], {}), '(matrix_list)\n', (25167, 25180), True, 'import numpy as np\n'), ((26401, 26425), 'sunpy.sun.constants.get', 'constants.get', (['"""alpha_0"""'], {}), "('alpha_0')\n", (26414, 26425), False, 'from sunpy.sun import constants\n'), ((26484, 26508), 'sunpy.sun.constants.get', 'constants.get', (['"""delta_0"""'], {}), "('delta_0')\n", (26497, 26508), False, 'from sunpy.sun import constants\n'), ((28363, 28392), 'numpy.any', 'np.any', (['(hcrs_time != hgs_time)'], {}), '(hcrs_time != hgs_time)\n', (28369, 28392), True, 'import numpy as np\n'), ((28421, 28459), 'astropy.coordinates.get_body_barycentric', 'get_body_barycentric', (['"""sun"""', 'hcrs_time'], {}), "('sun', hcrs_time)\n", (28441, 28459), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((29266, 29385), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""To perform this transformation, the HeliographicStonyhurst frame needs a specified `obstime`."""'], {}), "(\n 'To perform this transformation, the HeliographicStonyhurst frame needs a specified `obstime`.'\n )\n", (29278, 29385), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((30171, 30290), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""To perform this transformation, the HeliographicStonyhurst frame needs a specified `obstime`."""'], {}), "(\n 'To perform this transformation, the HeliographicStonyhurst frame needs a specified `obstime`.'\n )\n", (30183, 30290), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((33668, 33700), 'astropy.coordinates.representation.CartesianRepresentation', 'CartesianRepresentation', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (33691, 33700), False, 'from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation, UnitSphericalRepresentation\n'), ((34123, 34230), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""To perform this transformation, the coordinate frame needs a specified `obstime`."""'], {}), "(\n 'To perform this transformation, the coordinate frame needs a specified `obstime`.'\n )\n", (34135, 34230), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((35112, 35219), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""To perform this transformation, the coordinate frame needs a specified `obstime`."""'], {}), "(\n 'To perform this transformation, the coordinate frame needs a specified `obstime`.'\n )\n", (35124, 35219), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((36777, 36884), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""To perform this transformation, the coordinate frame needs a specified `obstime`."""'], {}), "(\n 'To perform this transformation, the coordinate frame needs a specified `obstime`.'\n )\n", (36789, 36884), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((37105, 37137), 'astropy.coordinates.representation.CartesianRepresentation', 'CartesianRepresentation', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (37128, 37137), False, 'from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation, UnitSphericalRepresentation\n'), ((37976, 38083), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""To perform this transformation, the coordinate frame needs a specified `obstime`."""'], {}), "(\n 'To perform this transformation, the coordinate frame needs a specified `obstime`.'\n )\n", (37988, 38083), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((38302, 38334), 'astropy.coordinates.representation.CartesianRepresentation', 'CartesianRepresentation', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (38325, 38334), False, 'from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation, UnitSphericalRepresentation\n'), ((39398, 39430), 'astropy.coordinates.representation.CartesianRepresentation', 'CartesianRepresentation', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (39421, 39430), False, 'from astropy.coordinates.representation import CartesianRepresentation, SphericalRepresentation, UnitSphericalRepresentation\n'), ((40530, 40637), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""To perform this transformation, the coordinate frame needs a specified `obstime`."""'], {}), "(\n 'To perform this transformation, the coordinate frame needs a specified `obstime`.'\n )\n", (40542, 40637), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((41353, 41460), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""To perform this transformation, the coordinate frame needs a specified `obstime`."""'], {}), "(\n 'To perform this transformation, the coordinate frame needs a specified `obstime`.'\n )\n", (41365, 41460), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((42839, 42946), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""To perform this transformation, the coordinate frame needs a specified `obstime`."""'], {}), "(\n 'To perform this transformation, the coordinate frame needs a specified `obstime`.'\n )\n", (42851, 42946), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((44159, 44266), 'astropy.coordinates.ConvertError', 'ConvertError', (['"""To perform this transformation, the coordinate frame needs a specified `obstime`."""'], {}), "(\n 'To perform this transformation, the coordinate frame needs a specified `obstime`.'\n )\n", (44171, 44266), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((1805, 1828), 'sunpy.sun.constants.get', 'constants.get', (['"""radius"""'], {}), "('radius')\n", (1818, 1828), False, 'from sunpy.sun import constants\n'), ((5488, 5565), 'sunpy.log.debug', 'log.debug', (['"""Ignoring the motion of the center of the Sun for transformations"""'], {}), "('Ignoring the motion of the center of the Sun for transformations')\n", (5497, 5565), False, 'from sunpy import log\n'), ((5677, 5764), 'sunpy.log.debug', 'log.debug', (['"""Stop ignoring the motion of the center of the Sun for transformations"""'], {}), "(\n 'Stop ignoring the motion of the center of the Sun for transformations')\n", (5686, 5764), False, 'from sunpy import log\n'), ((9012, 9130), 'sunpy.log.debug', 'log.debug', (['f"""Enabling automatic solar differential rotation (\'{rotation_model}\') for any changes in obstime"""'], {}), '(\n f"Enabling automatic solar differential rotation (\'{rotation_model}\') for any changes in obstime"\n )\n', (9021, 9130), False, 'from sunpy import log\n'), ((9287, 9384), 'sunpy.log.debug', 'log.debug', (['"""Disabling automatic solar differential rotation for any changes in obstime"""'], {}), "(\n 'Disabling automatic solar differential rotation for any changes in obstime'\n )\n", (9296, 9384), False, 'from sunpy import log\n'), ((9988, 10011), 'sunpy.log.getEffectiveLevel', 'log.getEffectiveLevel', ([], {}), '()\n', (10009, 10011), False, 'from sunpy import log\n'), ((10521, 10559), 'sunpy.log.debug', 'log.debug', (['f"""{indention}{description}"""'], {}), "(f'{indention}{description}')\n", (10530, 10559), False, 'from sunpy import log\n'), ((10576, 10619), 'sunpy.log.debug', 'log.debug', (['f"""{indention}├─From: {from_str}"""'], {}), "(f'{indention}├─From: {from_str}')\n", (10585, 10619), False, 'from sunpy import log\n'), ((10646, 10687), 'sunpy.log.debug', 'log.debug', (['f"""{indention}├─To : {to_str}"""'], {}), "(f'{indention}├─To : {to_str}')\n", (10655, 10687), False, 'from sunpy import log\n'), ((11200, 11242), 'sunpy.log.debug', 'log.debug', (['f"""{indention}└─Out : {out_str}"""'], {}), "(f'{indention}└─Out : {out_str}')\n", (11209, 11242), False, 'from sunpy import log\n'), ((13137, 13320), 'astropy.coordinates.ConvertError', 'ConvertError', (['f"""This transformation cannot be performed because the {frame.__class__.__name__} frame needs a specified obstime to fully resolve observer=\'{frame.observer}\'."""'], {}), '(\n f"This transformation cannot be performed because the {frame.__class__.__name__} frame needs a specified obstime to fully resolve observer=\'{frame.observer}\'."\n )\n', (13149, 13320), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((34473, 34503), 'astropy.coordinates.HCRS', 'HCRS', ([], {'obstime': 'hmecoord.obstime'}), '(obstime=hmecoord.obstime)\n', (34477, 34503), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((35651, 35682), 'astropy.coordinates.HCRS', 'HCRS', ([], {'obstime': 'int_coord.obstime'}), '(obstime=int_coord.obstime)\n', (35655, 35682), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((43180, 43211), 'astropy.coordinates.HCRS', 'HCRS', ([], {'obstime': 'int_frame.obstime'}), '(obstime=int_frame.obstime)\n', (43184, 43211), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((45080, 45111), 'astropy.coordinates.HCRS', 'HCRS', ([], {'obstime': 'int_coord.obstime'}), '(obstime=int_coord.obstime)\n', (45084, 45111), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((12556, 12588), 'astropy.units.allclose', 'u.allclose', (['obs_1.lat', 'obs_2.lat'], {}), '(obs_1.lat, obs_2.lat)\n', (12566, 12588), True, 'import astropy.units as u\n'), ((12619, 12651), 'astropy.units.allclose', 'u.allclose', (['obs_1.lon', 'obs_2.lon'], {}), '(obs_1.lon, obs_2.lon)\n', (12629, 12651), True, 'import astropy.units as u\n'), ((12682, 12720), 'astropy.units.allclose', 'u.allclose', (['obs_1.radius', 'obs_2.radius'], {}), '(obs_1.radius, obs_2.radius)\n', (12692, 12720), True, 'import astropy.units as u\n'), ((13458, 13599), 'astropy.coordinates.ConvertError', 'ConvertError', (['f"""The {frame.__class__.__name__} frame has observer=\'self\' but this is valid for only HeliographicCarrington frames."""'], {}), '(\n f"The {frame.__class__.__name__} frame has observer=\'self\' but this is valid for only HeliographicCarrington frames."\n )\n', (13470, 13599), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((42433, 42453), 'astropy.coordinates.builtin_frames.utils.get_jd12', 'get_jd12', (['time', '"""tt"""'], {}), "(time, 'tt')\n", (42441, 42453), False, 'from astropy.coordinates.builtin_frames.utils import get_jd12\n'), ((45671, 45701), 'astropy.coordinates.HCRS', 'HCRS', ([], {'obstime': 'from_coo.obstime'}), '(obstime=from_coo.obstime)\n', (45675, 45701), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((31524, 31554), 'astropy.coordinates.HCRS', 'HCRS', ([], {'obstime': 'to_frame.obstime'}), '(obstime=to_frame.obstime)\n', (31528, 31554), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n'), ((36235, 36265), 'astropy.coordinates.HCRS', 'HCRS', ([], {'obstime': 'from_coo.obstime'}), '(obstime=from_coo.obstime)\n', (36239, 36265), False, 'from astropy.coordinates import HCRS, ICRS, BaseCoordinateFrame, ConvertError, HeliocentricMeanEcliptic, get_body_barycentric\n')]
|
import os
import glob
import sys
sys.path.append('C:\\3DUnetCNN-master\\')
from unet3d.data import write_data_to_file, open_data_file
from unet3d.generator import get_training_and_validation_generators
from unet3d.model import unet_model_3d
from unet3d.training import load_old_model, train_model
config = dict()
config["pool_size"] = (2, 2, 2) # pool size for the max pooling operations
config["image_shape"] = (96, 96, 96) # This determines what shape the images will be cropped/resampled to.
config["patch_shape"] = None # switch to None to train on the whole image
config["labels"] = (1, 2, 4) # the label numbers on the input image
config["n_labels"] = len(config["labels"])
config["all_modalities"] = ["t1", "t1ce", "flair", "t2"]
config["training_modalities"] = config["all_modalities"] # change this if you want to only use some of the modalities
config["nb_channels"] = len(config["training_modalities"])
if "patch_shape" in config and config["patch_shape"] is not None:
config["input_shape"] = tuple([config["nb_channels"]] + list(config["patch_shape"]))
else:
config["input_shape"] = tuple([config["nb_channels"]] + list(config["image_shape"]))
config["truth_channel"] = config["nb_channels"]
config["deconvolution"] = True # if False, will use upsampling instead of deconvolution
config["batch_size"] = 1
config["validation_batch_size"] = 1
config["n_epochs"] = 50 # cutoff the training after this many epochs
config["patience"] = 10 # learning rate will be reduced after this many epochs if the validation loss is not improving
config["early_stop"] = 50 # training will be stopped after this many epochs without the validation loss improving
config["initial_learning_rate"] = 0.00001
config["learning_rate_drop"] = 0.5 # factor by which the learning rate will be reduced
config["validation_split"] = 0.99 #0.8 # portion of the data that will be used for training
config["flip"] = False # augments the data by randomly flipping an axis during
config["permute"] = True # data shape must be a cube. Augments the data by permuting in various directions
config["distort"] = None # switch to None if you want no distortion
config["augment"] = config["flip"] or config["distort"]
config["validation_patch_overlap"] = 0 # if > 0, during training, validation patches will be overlapping
config["training_patch_start_offset"] = (16, 16, 16) # randomly offset the first patch index by up to this offset
config["skip_blank"] = True # if True, then patches without any target will be skipped
config["data_file"] = os.path.abspath("brats_data.h5")
config["model_file"] = os.path.abspath("tumor_segmentation_model.h5")
config["training_file"] = os.path.abspath("training_ids.pkl")
config["validation_file"] = os.path.abspath("validation_ids.pkl")
config["overwrite"] = False # If True, will previous files. If False, will use previously written files.
def fetch_training_data_files():
training_data_files = list()
for subject_dir in glob.glob(os.path.join(os.path.dirname(__file__), "data", "preprocessed", "*", "*")):
subject_files = list()
for modality in config["training_modalities"] + ["truth"]:
subject_files.append(os.path.join(subject_dir, modality + ".nii.gz"))
training_data_files.append(tuple(subject_files))
return training_data_files
def main(overwrite=False):
# convert input images into an hdf5 file
if overwrite or not os.path.exists(config["data_file"]):
training_files = fetch_training_data_files()
write_data_to_file(training_files, config["data_file"], image_shape=config["image_shape"])
data_file_opened = open_data_file(config["data_file"])
if not overwrite and os.path.exists(config["model_file"]):
model = load_old_model(config["model_file"])
else:
# instantiate new model
model = unet_model_3d(input_shape=config["input_shape"],
pool_size=config["pool_size"],
n_labels=config["n_labels"],
initial_learning_rate=config["initial_learning_rate"],
deconvolution=config["deconvolution"])
# get training and testing generators
train_generator, validation_generator, n_train_steps, n_validation_steps = get_training_and_validation_generators(
data_file_opened,
batch_size=config["batch_size"],
data_split=config["validation_split"],
overwrite=overwrite,
validation_keys_file=config["validation_file"],
training_keys_file=config["training_file"],
n_labels=config["n_labels"],
labels=config["labels"],
patch_shape=config["patch_shape"],
validation_batch_size=config["validation_batch_size"],
validation_patch_overlap=config["validation_patch_overlap"],
training_patch_start_offset=config["training_patch_start_offset"],
permute=config["permute"],
augment=config["augment"],
skip_blank=config["skip_blank"],
augment_flip=config["flip"],
augment_distortion_factor=config["distort"])
# run training
train_model(model=model,
model_file=config["model_file"],
training_generator=train_generator,
validation_generator=validation_generator,
steps_per_epoch=n_train_steps,
validation_steps=n_validation_steps,
initial_learning_rate=config["initial_learning_rate"],
learning_rate_drop=config["learning_rate_drop"],
learning_rate_patience=config["patience"],
early_stopping_patience=config["early_stop"],
n_epochs=config["n_epochs"])
data_file_opened.close()
if __name__ == "__main__":
main(overwrite=config["overwrite"])
|
[
"sys.path.append",
"os.path.abspath",
"os.path.dirname",
"os.path.exists",
"unet3d.data.open_data_file",
"unet3d.model.unet_model_3d",
"unet3d.generator.get_training_and_validation_generators",
"unet3d.training.load_old_model",
"unet3d.training.train_model",
"os.path.join",
"unet3d.data.write_data_to_file"
] |
[((34, 75), 'sys.path.append', 'sys.path.append', (['"""C:\\\\3DUnetCNN-master\\\\"""'], {}), "('C:\\\\3DUnetCNN-master\\\\')\n", (49, 75), False, 'import sys\n'), ((2546, 2578), 'os.path.abspath', 'os.path.abspath', (['"""brats_data.h5"""'], {}), "('brats_data.h5')\n", (2561, 2578), False, 'import os\n'), ((2602, 2648), 'os.path.abspath', 'os.path.abspath', (['"""tumor_segmentation_model.h5"""'], {}), "('tumor_segmentation_model.h5')\n", (2617, 2648), False, 'import os\n'), ((2675, 2710), 'os.path.abspath', 'os.path.abspath', (['"""training_ids.pkl"""'], {}), "('training_ids.pkl')\n", (2690, 2710), False, 'import os\n'), ((2739, 2776), 'os.path.abspath', 'os.path.abspath', (['"""validation_ids.pkl"""'], {}), "('validation_ids.pkl')\n", (2754, 2776), False, 'import os\n'), ((3639, 3674), 'unet3d.data.open_data_file', 'open_data_file', (["config['data_file']"], {}), "(config['data_file'])\n", (3653, 3674), False, 'from unet3d.data import write_data_to_file, open_data_file\n'), ((4295, 5013), 'unet3d.generator.get_training_and_validation_generators', 'get_training_and_validation_generators', (['data_file_opened'], {'batch_size': "config['batch_size']", 'data_split': "config['validation_split']", 'overwrite': 'overwrite', 'validation_keys_file': "config['validation_file']", 'training_keys_file': "config['training_file']", 'n_labels': "config['n_labels']", 'labels': "config['labels']", 'patch_shape': "config['patch_shape']", 'validation_batch_size': "config['validation_batch_size']", 'validation_patch_overlap': "config['validation_patch_overlap']", 'training_patch_start_offset': "config['training_patch_start_offset']", 'permute': "config['permute']", 'augment': "config['augment']", 'skip_blank': "config['skip_blank']", 'augment_flip': "config['flip']", 'augment_distortion_factor': "config['distort']"}), "(data_file_opened, batch_size=config[\n 'batch_size'], data_split=config['validation_split'], overwrite=\n overwrite, validation_keys_file=config['validation_file'],\n training_keys_file=config['training_file'], n_labels=config['n_labels'],\n labels=config['labels'], patch_shape=config['patch_shape'],\n validation_batch_size=config['validation_batch_size'],\n validation_patch_overlap=config['validation_patch_overlap'],\n training_patch_start_offset=config['training_patch_start_offset'],\n permute=config['permute'], augment=config['augment'], skip_blank=config\n ['skip_blank'], augment_flip=config['flip'], augment_distortion_factor=\n config['distort'])\n", (4333, 5013), False, 'from unet3d.generator import get_training_and_validation_generators\n'), ((5131, 5585), 'unet3d.training.train_model', 'train_model', ([], {'model': 'model', 'model_file': "config['model_file']", 'training_generator': 'train_generator', 'validation_generator': 'validation_generator', 'steps_per_epoch': 'n_train_steps', 'validation_steps': 'n_validation_steps', 'initial_learning_rate': "config['initial_learning_rate']", 'learning_rate_drop': "config['learning_rate_drop']", 'learning_rate_patience': "config['patience']", 'early_stopping_patience': "config['early_stop']", 'n_epochs': "config['n_epochs']"}), "(model=model, model_file=config['model_file'],\n training_generator=train_generator, validation_generator=\n validation_generator, steps_per_epoch=n_train_steps, validation_steps=\n n_validation_steps, initial_learning_rate=config[\n 'initial_learning_rate'], learning_rate_drop=config[\n 'learning_rate_drop'], learning_rate_patience=config['patience'],\n early_stopping_patience=config['early_stop'], n_epochs=config['n_epochs'])\n", (5142, 5585), False, 'from unet3d.training import load_old_model, train_model\n'), ((3525, 3620), 'unet3d.data.write_data_to_file', 'write_data_to_file', (['training_files', "config['data_file']"], {'image_shape': "config['image_shape']"}), "(training_files, config['data_file'], image_shape=config[\n 'image_shape'])\n", (3543, 3620), False, 'from unet3d.data import write_data_to_file, open_data_file\n'), ((3701, 3737), 'os.path.exists', 'os.path.exists', (["config['model_file']"], {}), "(config['model_file'])\n", (3715, 3737), False, 'import os\n'), ((3755, 3791), 'unet3d.training.load_old_model', 'load_old_model', (["config['model_file']"], {}), "(config['model_file'])\n", (3769, 3791), False, 'from unet3d.training import load_old_model, train_model\n'), ((3850, 4062), 'unet3d.model.unet_model_3d', 'unet_model_3d', ([], {'input_shape': "config['input_shape']", 'pool_size': "config['pool_size']", 'n_labels': "config['n_labels']", 'initial_learning_rate': "config['initial_learning_rate']", 'deconvolution': "config['deconvolution']"}), "(input_shape=config['input_shape'], pool_size=config[\n 'pool_size'], n_labels=config['n_labels'], initial_learning_rate=config\n ['initial_learning_rate'], deconvolution=config['deconvolution'])\n", (3863, 4062), False, 'from unet3d.model import unet_model_3d\n'), ((2997, 3022), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3012, 3022), False, 'import os\n'), ((3426, 3461), 'os.path.exists', 'os.path.exists', (["config['data_file']"], {}), "(config['data_file'])\n", (3440, 3461), False, 'import os\n'), ((3191, 3238), 'os.path.join', 'os.path.join', (['subject_dir', "(modality + '.nii.gz')"], {}), "(subject_dir, modality + '.nii.gz')\n", (3203, 3238), False, 'import os\n')]
|
# pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.views.gtk3.widgets.test_combo.py is part of The RAMSTK Project
#
# All rights reserved.
"""Test class for the GTK3 combo module algorithms and models."""
# Third Party Imports
import pytest
# RAMSTK Package Imports
from ramstk.views.gtk3 import GObject
from ramstk.views.gtk3.widgets import RAMSTKComboBox
class TestRAMSTKComboBox:
"""Test class for the RAMSTKComboBox."""
@pytest.mark.gui
def test_create_entry(self):
"""__init__() should create a RAMSTKEntry."""
DUT = RAMSTKComboBox()
assert isinstance(DUT, RAMSTKComboBox)
assert DUT._index == 0
assert DUT.get_property("height-request") == -1
assert DUT.get_property("width-request") == -1
assert DUT.get_model().get_n_columns() == 1
assert DUT.get_model().get_column_type(0) == GObject.TYPE_STRING
@pytest.mark.gui
def test_create_combobox_not_simple(self):
"""__init__() should create a RAMSTKComboBox with three columns when passed simple=False."""
DUT = RAMSTKComboBox(index=2, simple=False)
assert isinstance(DUT, RAMSTKComboBox)
assert DUT._index == 2
assert DUT.get_property("height-request") == -1
assert DUT.get_property("width-request") == -1
assert DUT.get_model().get_n_columns() == 3
assert DUT.get_model().get_column_type(0) == GObject.TYPE_STRING
assert DUT.get_model().get_column_type(1) == GObject.TYPE_STRING
assert DUT.get_model().get_column_type(2) == GObject.TYPE_STRING
@pytest.mark.gui
def test_set_properties(self):
"""do_set_properties() should set the properties of a RAMSTKComboBox."""
DUT = RAMSTKComboBox()
DUT.do_set_properties(height=70, width=150, tooltip="Test tooltip")
assert DUT.get_property("height-request") == 70
assert DUT.get_property("tooltip-markup") == "Test tooltip"
assert DUT.get_property("width-request") == 150
@pytest.mark.gui
def test_set_properties_default_values(self):
"""do_set_properties() should set the default properties of a RAMSTKComboBox when no keywords are passed to the method."""
DUT = RAMSTKComboBox()
DUT.do_set_properties()
assert DUT.get_property("height-request") == 30
assert DUT.get_property("tooltip-markup") == (
"Missing tooltip, please file a quality type issue to have one " "added."
)
assert DUT.get_property("width-request") == 200
@pytest.mark.gui
def test_set_properties_zero_height(self):
"""do_set_properties() should set the height to the default value if it is passed as zero."""
DUT = RAMSTKComboBox()
DUT.do_set_properties(height=0)
assert DUT.get_property("height-request") == 30
@pytest.mark.gui
def test_set_properties_zero_width(self):
"""do_set_properties() should set the width to the default value if it is passed as zero."""
DUT = RAMSTKComboBox()
DUT.do_set_properties(width=0)
assert DUT.get_property("width-request") == 200
@pytest.mark.gui
def test_do_load_combo_simple(self):
"""do_load_combo() should load a list of string values into a simple RAMSTKComboBox."""
_test_list = [
["This"],
["is"],
["a"],
["test"],
["of"],
["the"],
["RAMSTKComboBox"],
]
DUT = RAMSTKComboBox()
assert DUT.do_load_combo(_test_list) is None
@pytest.mark.gui
def test_do_load_combo_not_simple(self):
"""do_load_combo() should load a list of string values into a non-simple RAMSTKComboBox."""
_test_list = [
["This", "is", "a"],
["test", "of", "the"],
["RAMSTKComboBox", "not", "simple"],
]
DUT = RAMSTKComboBox(index=1, simple=False)
assert DUT.do_load_combo(_test_list, simple=False) is None
@pytest.mark.gui
def test_do_load_combo_simple_not_string_list(self):
"""do_load_combo() should raise a TypeError when passed a list of other than strings to load or a single non-string value."""
_test_list = [0, 1, 2, 3, 4]
DUT = RAMSTKComboBox()
with pytest.raises(TypeError):
DUT.do_load_combo(_test_list)
with pytest.raises(TypeError):
DUT.do_load_combo(10)
@pytest.mark.gui
def test_do_get_options_simple(self):
"""do_get_options() should return a dict of all the options available in a simple RAMSTKComboBox."""
_test_list = [
["This"],
["is"],
["a"],
["test"],
["of"],
["the"],
["RAMSTKComboBox"],
]
DUT = RAMSTKComboBox()
DUT.do_load_combo(_test_list)
_options = DUT.do_get_options()
assert isinstance(_options, dict)
assert _options == {
0: "",
1: "This",
2: "is",
3: "a",
4: "test",
5: "of",
6: "the",
7: "RAMSTKComboBox",
}
@pytest.mark.gui
def test_do_get_options_not_simple(self):
"""do_load_combo() should load a list of string values into a non-simple RAMSTKComboBox."""
_test_list = [
["This", "is", "a"],
["test", "of", "the"],
["RAMSTKComboBox", "not", "simple"],
]
DUT = RAMSTKComboBox(index=1, simple=False)
DUT.do_load_combo(_test_list, simple=False)
_options = DUT.do_get_options()
assert isinstance(_options, dict)
assert _options == {0: "", 1: "is", 2: "of", 3: "not"}
|
[
"pytest.raises",
"ramstk.views.gtk3.widgets.RAMSTKComboBox"
] |
[((579, 595), 'ramstk.views.gtk3.widgets.RAMSTKComboBox', 'RAMSTKComboBox', ([], {}), '()\n', (593, 595), False, 'from ramstk.views.gtk3.widgets import RAMSTKComboBox\n'), ((1095, 1132), 'ramstk.views.gtk3.widgets.RAMSTKComboBox', 'RAMSTKComboBox', ([], {'index': '(2)', 'simple': '(False)'}), '(index=2, simple=False)\n', (1109, 1132), False, 'from ramstk.views.gtk3.widgets import RAMSTKComboBox\n'), ((1746, 1762), 'ramstk.views.gtk3.widgets.RAMSTKComboBox', 'RAMSTKComboBox', ([], {}), '()\n', (1760, 1762), False, 'from ramstk.views.gtk3.widgets import RAMSTKComboBox\n'), ((2237, 2253), 'ramstk.views.gtk3.widgets.RAMSTKComboBox', 'RAMSTKComboBox', ([], {}), '()\n', (2251, 2253), False, 'from ramstk.views.gtk3.widgets import RAMSTKComboBox\n'), ((2735, 2751), 'ramstk.views.gtk3.widgets.RAMSTKComboBox', 'RAMSTKComboBox', ([], {}), '()\n', (2749, 2751), False, 'from ramstk.views.gtk3.widgets import RAMSTKComboBox\n'), ((3032, 3048), 'ramstk.views.gtk3.widgets.RAMSTKComboBox', 'RAMSTKComboBox', ([], {}), '()\n', (3046, 3048), False, 'from ramstk.views.gtk3.widgets import RAMSTKComboBox\n'), ((3507, 3523), 'ramstk.views.gtk3.widgets.RAMSTKComboBox', 'RAMSTKComboBox', ([], {}), '()\n', (3521, 3523), False, 'from ramstk.views.gtk3.widgets import RAMSTKComboBox\n'), ((3909, 3946), 'ramstk.views.gtk3.widgets.RAMSTKComboBox', 'RAMSTKComboBox', ([], {'index': '(1)', 'simple': '(False)'}), '(index=1, simple=False)\n', (3923, 3946), False, 'from ramstk.views.gtk3.widgets import RAMSTKComboBox\n'), ((4279, 4295), 'ramstk.views.gtk3.widgets.RAMSTKComboBox', 'RAMSTKComboBox', ([], {}), '()\n', (4293, 4295), False, 'from ramstk.views.gtk3.widgets import RAMSTKComboBox\n'), ((4827, 4843), 'ramstk.views.gtk3.widgets.RAMSTKComboBox', 'RAMSTKComboBox', ([], {}), '()\n', (4841, 4843), False, 'from ramstk.views.gtk3.widgets import RAMSTKComboBox\n'), ((5518, 5555), 'ramstk.views.gtk3.widgets.RAMSTKComboBox', 'RAMSTKComboBox', ([], {'index': '(1)', 'simple': '(False)'}), '(index=1, simple=False)\n', (5532, 5555), False, 'from ramstk.views.gtk3.widgets import RAMSTKComboBox\n'), ((4310, 4334), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4323, 4334), False, 'import pytest\n'), ((4391, 4415), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4404, 4415), False, 'import pytest\n')]
|
from setuptools import setup
from os import path
import io
here = path.abspath(path.dirname(__file__))
with io.open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
requires = f.read().split()
setup(
name='cufflinks',
version='0.17.1',
description='Productivity Tools for Plotly + Pandas',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
keywords=['pandas', 'plotly', 'plotting'],
url='https://github.com/santosjorge/cufflinks',
packages=['cufflinks'],
package_data={'cufflinks': ['../helper/*.json']},
include_package_data=True,
install_requires=requires,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
zip_safe=False
)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.setup"
] |
[((211, 755), 'setuptools.setup', 'setup', ([], {'name': '"""cufflinks"""', 'version': '"""0.17.1"""', 'description': '"""Productivity Tools for Plotly + Pandas"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'keywords': "['pandas', 'plotly', 'plotting']", 'url': '"""https://github.com/santosjorge/cufflinks"""', 'packages': "['cufflinks']", 'package_data': "{'cufflinks': ['../helper/*.json']}", 'include_package_data': '(True)', 'install_requires': 'requires', 'classifiers': "['Programming Language :: Python', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3']", 'zip_safe': '(False)'}), "(name='cufflinks', version='0.17.1', description=\n 'Productivity Tools for Plotly + Pandas', author='<NAME>', author_email\n ='<EMAIL>', license='MIT', keywords=['pandas', 'plotly', 'plotting'],\n url='https://github.com/santosjorge/cufflinks', packages=['cufflinks'],\n package_data={'cufflinks': ['../helper/*.json']}, include_package_data=\n True, install_requires=requires, classifiers=[\n 'Programming Language :: Python', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3'], zip_safe=False)\n", (216, 755), False, 'from setuptools import setup\n'), ((80, 102), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (92, 102), False, 'from os import path\n'), ((117, 152), 'os.path.join', 'path.join', (['here', '"""requirements.txt"""'], {}), "(here, 'requirements.txt')\n", (126, 152), False, 'from os import path\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG Website - A Django-powered website for Reaction Mechanism Generator #
# #
# Copyright (c) 2011-2018 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
from django.db import models
from django.contrib.auth.models import User
################################################################################
ENERGY_UNITS = [
('J/mol', 'J/mol'),
('kJ/mol', 'kJ/mol'),
('cal/mol', 'cal/mol'),
('kcal/mol', 'kcal/mol'),
('cm^-1', 'cm^-1'),
]
HEATCAPACITY_UNITS = [
('J/(mol*K)', 'J/mol*K'),
('kJ/(mol*K)', 'kJ/mol*K'),
('cal/(mol*K)', 'cal/mol*K'),
('kcal/(mol*K)', 'kcal/mol*K'),
]
RATECOEFFICIENT_UNITS = [
('m^3,mol,s', 'm^3, mol, s'),
('cm^3,mol,s', 'cm^3, mol, s'),
('m^3,molecule,s', 'm^3, molecule, s'),
('cm^3,molecule,s', 'cm^3, molecule, s'),
]
TEMPERATURE_UNITS = [
('K', 'K'),
]
PRESSURE_UNITS = [
('Pa', 'Pa'),
('bar', 'bar'),
('atm', 'atm'),
('torr', 'torr'),
]
################################################################################
class UserProfile(models.Model):
"""
A model containing user profile information. Some of the information is
stored in the :class:`User` class built into Django; this class provides
extra custom information.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
organization = models.CharField(max_length=100)
website = models.CharField(max_length=100, blank=True)
bio = models.TextField(blank=True)
# Preferred units
energy_units = models.CharField(verbose_name='Energy units', max_length=100, choices=ENERGY_UNITS, default='kcal/mol')
heat_capacity_units = models.CharField(verbose_name='Heat capacity units', max_length=100, choices=HEATCAPACITY_UNITS, default='cal/(mol*K)')
rate_coefficient_units = models.CharField(verbose_name='Rate coefficient units', max_length=100, choices=RATECOEFFICIENT_UNITS, default='cm^3,mol,s')
temperature_units = models.CharField(verbose_name='Temperature units', max_length=100, choices=TEMPERATURE_UNITS, default='K')
pressure_units = models.CharField(verbose_name='Pressure units', max_length=100, choices=PRESSURE_UNITS, default='bar')
|
[
"django.db.models.CharField",
"django.db.models.OneToOneField",
"django.db.models.TextField"
] |
[((3210, 3262), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (3230, 3262), False, 'from django.db import models\n'), ((3282, 3314), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (3298, 3314), False, 'from django.db import models\n'), ((3329, 3373), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (3345, 3373), False, 'from django.db import models\n'), ((3384, 3412), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (3400, 3412), False, 'from django.db import models\n'), ((3454, 3562), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Energy units"""', 'max_length': '(100)', 'choices': 'ENERGY_UNITS', 'default': '"""kcal/mol"""'}), "(verbose_name='Energy units', max_length=100, choices=\n ENERGY_UNITS, default='kcal/mol')\n", (3470, 3562), False, 'from django.db import models\n'), ((3584, 3707), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Heat capacity units"""', 'max_length': '(100)', 'choices': 'HEATCAPACITY_UNITS', 'default': '"""cal/(mol*K)"""'}), "(verbose_name='Heat capacity units', max_length=100,\n choices=HEATCAPACITY_UNITS, default='cal/(mol*K)')\n", (3600, 3707), False, 'from django.db import models\n'), ((3733, 3861), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Rate coefficient units"""', 'max_length': '(100)', 'choices': 'RATECOEFFICIENT_UNITS', 'default': '"""cm^3,mol,s"""'}), "(verbose_name='Rate coefficient units', max_length=100,\n choices=RATECOEFFICIENT_UNITS, default='cm^3,mol,s')\n", (3749, 3861), False, 'from django.db import models\n'), ((3882, 3993), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Temperature units"""', 'max_length': '(100)', 'choices': 'TEMPERATURE_UNITS', 'default': '"""K"""'}), "(verbose_name='Temperature units', max_length=100, choices=\n TEMPERATURE_UNITS, default='K')\n", (3898, 3993), False, 'from django.db import models\n'), ((4010, 4117), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Pressure units"""', 'max_length': '(100)', 'choices': 'PRESSURE_UNITS', 'default': '"""bar"""'}), "(verbose_name='Pressure units', max_length=100, choices=\n PRESSURE_UNITS, default='bar')\n", (4026, 4117), False, 'from django.db import models\n')]
|
import sys
import os
# sys.path.append('C:\\Users\\Lynn\\Desktop\\instrument_control\\libraries')
sys.path.append(os.getcwd()[:os.getcwd().find("instrument_control")+len("instrument_control")]+'\\libraries')
from libraries import *
class tlb6700(object):
def __init__(self, **kwargs):
super(tlb6700,self).__init__()
self.tlb = Newport.USBComm.USB()
self.answer = StringBuilder(64)
self.ProductID = 4106
self.DeviceKey = '<KEY>'
def tlb_open(self):
self.tlb.CloseDevices()
self.tlb.OpenDevices(self.ProductID,True)
def close(self):
self.tlb.CloseDevices()
def query(self,msg):
self.answer.Clear()
self.tlb.Query(self.DeviceKey,msg,self.answer)
return self.answer.ToString()
def set_power(self,P):
# P is in units of mW
self.query(f'SOURCE:POWER:DIODE {P}')
power = self.query(f'SOURCE:POWER:DIODE?')
return power
def set_track(self,track=1):
self.query(f'OUTPUT:TRACK {track}')
t = self.query(f'OUTPUT:TRACK?')
return t
def set_lambda(self,λ):
# units of nm
self.query(f'SOURCE:WAVE {λ}')
wavelength = self.query(f'SOURCE:WAVE?')
return wavelength
def set_scan_limits(self,λi,λf):
# units of nm
self.query(f'SOURCE:WAVE:START {λi}')
self.query(f'SOURCE:WAVE:STOP {λf}')
start = self.query(f'SOURCE:WAVE:START?'); stop = self.query(f'SOURCE:WAVE:STOP?')
return start,stop
def set_scan_speeds(self,forward,backward=0.1):
self.query(f'SOURCE:WAVE:SLEW:FORWARD {forward}')
self.query(f'SOURCE:WAVE:SLEW:RETURN {backward}')
f = self.query(f'SOURCE:WAVE:SLEW:FORWARD?')
b = self.query(f'SOURCE:WAVE:SLEW:RETURN?')
return f,b
def set_scan_number(self,scan_number):
self.query(f'SOURCE:WAVE:DESSCANS {scan_number}')
number = self.query(f'SOURCE:WAVE:DESSCANS?')
return number
def isFree(self):
isFree = self.query('*OPC?')
return int(isFree)
def get_wavelength(self):
return float(self.query('SENSE:WAVELENGTH'))
def scan(self,signal):
# signal is true or false
if signal:
self.query(f'OUTPUT:SCAN:START')
else:
self.query(f'OUTPUT:SCAN:STOP')
|
[
"os.getcwd"
] |
[((117, 128), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (126, 128), False, 'import os\n'), ((130, 141), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (139, 141), False, 'import os\n')]
|
__extras__ = []
from .environment import Environment, MDPInfo
try:
Atari = None
from .atari import Atari
__extras__.append('Atari')
except ImportError as imp_e:
print("Could not import Atari environment")
import ipdb; ipdb.set_trace()
pass
try:
Gym = None
from .gym_env import Gym
__extras__.append('Gym')
except ImportError:
pass
try:
DMControl = None
from .dm_control_env import DMControl
__extras__.append('DMControl')
except ImportError:
pass
try:
Mujoco = None
from .mujoco import MuJoCo
__extras__.append('Mujoco')
except ImportError:
pass
try:
PyBullet = None
from .pybullet import PyBullet
__extras__.append('PyBullet')
except ImportError:
pass
from .generators.simple_chain import generate_simple_chain
from .car_on_hill import CarOnHill
from .cart_pole import CartPole
from .finite_mdp import FiniteMDP
from .grid_world import GridWorld, GridWorldVanHasselt
from .inverted_pendulum import InvertedPendulum
from .lqr import LQR
from .puddle_world import PuddleWorld
from .segway import Segway
from .ship_steering import ShipSteering
__all__ = ['Environment', 'MDPInfo', 'generate_simple_chain',
'CarOnHill', 'CartPole', 'FiniteMDP',
'GridWorld', 'GridWorldVanHasselt', 'InvertedPendulum',
'LQR', 'PuddleWorld', 'Segway',
'ShipSteering' ] + __extras__
|
[
"ipdb.set_trace"
] |
[((239, 255), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (253, 255), False, 'import ipdb\n')]
|
import logging
import sys
import requests
from requests import RequestException
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
BASE_URL = 'https://19ba7tbxn5-dsn.algolia.net/1/indexes/*/queries'
params = {
'x-algolia-agent': 'Algolia%20for%20JavaScript%20(3.34.0)%3B%20Browser',
'x-algolia-application-id': '19BA7TBXN5',
'x-algolia-api-key': '<KEY>'
}
query = '{"requests":[{"indexName":"assets","params":"' \
'query=&hitsPerPage=100&page=NUM_PAGE&facetFilters=%5B%5B%22type%3ABRAZILIAN_COMPANY%22%5D%5D"}]}'
class Fundamentei:
def __init__(self, base_url=BASE_URL, params=params, query=query, db=None):
self.base_url = base_url
self.params = params
self.query = query
self.db = db
self.results = []
def get_data(self, force_update=False, page=0):
response = requests.post(
self.base_url,
params=self.params,
data=self.query.replace('NUM_PAGE', str(page))
)
try:
response.raise_for_status()
except RequestException:
logging.error(response.text)
return
return response.json()
def get_results(self, page=0):
data = self.get_data(page=page)
return data['results'][0]['hits']
def get_all_results(self, update_db=False, drop_old_collection=True):
self.results = []
for page in range(100):
results = self.get_results(page=page)
if not results:
break
self.results += results
if update_db:
self.save_data(self.results, 'hits', drop_old_collection=drop_old_collection)
return self.results
def save_data(self, data, collection, drop_old_collection=False):
if not self.db:
logging.error('Could not save data. Please choose a database')
return
collection = self.db[collection]
many = not isinstance(data, dict) and len(data) > 1
if drop_old_collection:
collection.drop()
if many:
return collection.insert_many(data)
return collection.insert_one(data)
|
[
"logging.error",
"logging.basicConfig"
] |
[((82, 140), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (101, 140), False, 'import logging\n'), ((1810, 1872), 'logging.error', 'logging.error', (['"""Could not save data. Please choose a database"""'], {}), "('Could not save data. Please choose a database')\n", (1823, 1872), False, 'import logging\n'), ((1096, 1124), 'logging.error', 'logging.error', (['response.text'], {}), '(response.text)\n', (1109, 1124), False, 'import logging\n')]
|
from keras.callbacks import Callback
import keras.backend as K
import numpy as np
class SGDRScheduler(Callback):
'''Cosine annealing learning rate scheduler with periodic restarts.
# Usage
```python
schedule = SGDRScheduler(min_lr=1e-5,
max_lr=1e-2,
steps_per_epoch=np.ceil(epoch_size/batch_size),
lr_decay=0.9,
cycle_length=5,
mult_factor=1.5)
model.fit(X_train, Y_train, epochs=100, callbacks=[schedule])
```
# Arguments
min_lr: The lower bound of the learning rate range for the experiment.
max_lr: The upper bound of the learning rate range for the experiment.
steps_per_epoch: Number of mini-batches in the dataset. Calculated as `np.ceil(epoch_size/batch_size)`.
lr_decay: Reduce the max_lr after the completion of each cycle.
Ex. To reduce the max_lr by 20% after each cycle, set this value to 0.8.
cycle_length: Initial number of epochs in a cycle.
mult_factor: Scale epochs_to_restart after each full cycle completion.
# References
Blog post: jeremyjordan.me/nn-learning-rate
Original paper: http://arxiv.org/abs/1608.03983
'''
def __init__(self,
min_lr,
max_lr,
steps_per_epoch,
lr_decay=1,
cycle_length=10,
mult_factor=2):
self.min_lr = min_lr
self.max_lr = max_lr
self.lr_decay = lr_decay
self.batch_since_restart = 0
self.next_restart = cycle_length
self.steps_per_epoch = steps_per_epoch
self.cycle_length = cycle_length
self.mult_factor = mult_factor
self.history = {}
def clr(self):
'''Calculate the learning rate.'''
fraction_to_restart = self.batch_since_restart / (self.steps_per_epoch * self.cycle_length)
lr = self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(fraction_to_restart * np.pi))
return lr
def on_train_begin(self, logs={}):
'''Initialize the learning rate to the minimum value at the start of training.'''
logs = logs or {}
K.set_value(self.model.optimizer.lr, self.max_lr)
def on_batch_end(self, batch, logs={}):
'''Record previous batch statistics and update the learning rate.'''
logs = logs or {}
self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
self.batch_since_restart += 1
K.set_value(self.model.optimizer.lr, self.clr())
def on_epoch_end(self, epoch, logs={}):
if callable(self.lr_decay):
decay = self.lr_decay(epoch)
else:
decay = self.lr_decay
'''Check for end of current cycle, apply restarts when necessary.'''
if epoch + 1 == self.next_restart:
self.batch_since_restart = 0
self.cycle_length = np.ceil(self.cycle_length * self.mult_factor)
self.next_restart += self.cycle_length
self.max_lr *= decay
print('SGDRScheduler :: set max lr in ', self.max_lr)
# def on_train_end(self, logs={}):
# '''Set weights to the values from the end of the most recent cycle for best performance.'''
# self.model.set_weights(self.best_weights)
|
[
"keras.backend.set_value",
"keras.backend.get_value",
"numpy.ceil",
"numpy.cos"
] |
[((2345, 2394), 'keras.backend.set_value', 'K.set_value', (['self.model.optimizer.lr', 'self.max_lr'], {}), '(self.model.optimizer.lr, self.max_lr)\n', (2356, 2394), True, 'import keras.backend as K\n'), ((2592, 2628), 'keras.backend.get_value', 'K.get_value', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (2603, 2628), True, 'import keras.backend as K\n'), ((3176, 3221), 'numpy.ceil', 'np.ceil', (['(self.cycle_length * self.mult_factor)'], {}), '(self.cycle_length * self.mult_factor)\n', (3183, 3221), True, 'import numpy as np\n'), ((2126, 2161), 'numpy.cos', 'np.cos', (['(fraction_to_restart * np.pi)'], {}), '(fraction_to_restart * np.pi)\n', (2132, 2161), True, 'import numpy as np\n')]
|
import hashlib
import os
import json
import random
import collections
from operator import itemgetter
import warnings
import jinja2
from docutils import nodes
from docutils.statemachine import ViewList
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives import flag
from sphinx.util.nodes import nested_parse_with_titles
from .utils import (
get_docstring_and_rest,
prev_this_next,
create_thumbnail,
create_generic_image,
)
from altair.utils.execeval import eval_block
from altair.examples import iter_examples
EXAMPLE_MODULE = "altair.examples"
GALLERY_TEMPLATE = jinja2.Template(
"""
.. This document is auto-generated by the altair-gallery extension. Do not modify directly.
.. _{{ gallery_ref }}:
{{ title }}
{% for char in title %}-{% endfor %}
This gallery contains a selection of examples of the plots Altair can create.
Some may seem fairly complicated at first glance, but they are built by combining a simple set of declarative building blocks.
Many draw upon sample datasets compiled by the `Vega <https://vega.github.io/vega/>`_ project. To access them yourself, install `vega_datasets <https://github.com/altair-viz/vega_datasets>`_.
.. code-block:: none
$ pip install vega_datasets
{% for grouper, group in examples %}
.. _gallery-category-{{ grouper }}:
{{ grouper }}
{% for char in grouper %}~{% endfor %}
.. raw:: html
<span class="gallery">
{% for example in group %}
<a class="imagegroup" href="{{ example.name }}.html">
<span class="image" alt="{{ example.title }}" style="background-image: url({{ image_dir }}/{{ example.name }}-thumb.png);"></span>
<span class="image-title">{{ example.title }}</span>
</a>
{% endfor %}
</span>
<div style='clear:both;'></div>
.. toctree::
:hidden:
{% for example in group %}
{{ example.name }}
{%- endfor %}
{% endfor %}
"""
)
MINIGALLERY_TEMPLATE = jinja2.Template(
"""
.. raw:: html
<div id="showcase">
<div class="examples">
{% for example in examples %}
<a class="preview" href="{{ gallery_dir }}/{{ example.name }}.html" style="background-image: url({{ image_dir }}/{{ example.name }}-thumb.png)"></a>
{% endfor %}
</div>
</div>
"""
)
EXAMPLE_TEMPLATE = jinja2.Template(
"""
.. This document is auto-generated by the altair-gallery extension. Do not modify directly.
.. _gallery_{{ name }}:
{{ docstring }}
.. altair-plot::
{% if code_below %}:code-below:{% endif %}
{% if strict %}:strict:{% endif %}
{{ code | indent(4) }}
.. toctree::
:hidden:
"""
)
def save_example_pngs(examples, image_dir, make_thumbnails=True):
"""Save example pngs and (optionally) thumbnails"""
if not os.path.exists(image_dir):
os.makedirs(image_dir)
# store hashes so that we know whether images need to be generated
hash_file = os.path.join(image_dir, "_image_hashes.json")
if os.path.exists(hash_file):
with open(hash_file) as f:
hashes = json.load(f)
else:
hashes = {}
for example in examples:
filename = example["name"] + ".png"
image_file = os.path.join(image_dir, filename)
example_hash = hashlib.md5(example["code"].encode()).hexdigest()
hashes_match = hashes.get(filename, "") == example_hash
if hashes_match and os.path.exists(image_file):
print("-> using cached {}".format(image_file))
else:
# the file changed or the image file does not exist. Generate it.
print("-> saving {}".format(image_file))
chart = eval_block(example["code"])
try:
chart.save(image_file)
hashes[filename] = example_hash
except ImportError:
warnings.warn("Unable to save image: using generic image")
create_generic_image(image_file)
with open(hash_file, "w") as f:
json.dump(hashes, f)
if make_thumbnails:
params = example.get("galleryParameters", {})
thumb_file = os.path.join(image_dir, example["name"] + "-thumb.png")
create_thumbnail(image_file, thumb_file, **params)
# Save hashes so we know whether we need to re-generate plots
with open(hash_file, "w") as f:
json.dump(hashes, f)
def populate_examples(**kwds):
"""Iterate through Altair examples and extract code"""
examples = sorted(iter_examples(), key=itemgetter("name"))
for example in examples:
docstring, category, code, lineno = get_docstring_and_rest(example["filename"])
example.update(kwds)
if category is None:
category = "other charts"
example.update(
{
"docstring": docstring,
"title": docstring.strip().split("\n")[0],
"code": code,
"category": category.title(),
"lineno": lineno,
}
)
return examples
class AltairMiniGalleryDirective(Directive):
has_content = False
option_spec = {
"size": int,
"names": str,
"indices": lambda x: list(map(int, x.split())),
"shuffle": flag,
"seed": int,
"titles": bool,
"width": str,
}
def run(self):
size = self.options.get("size", 15)
names = [name.strip() for name in self.options.get("names", "").split(",")]
indices = self.options.get("indices", [])
shuffle = "shuffle" in self.options
seed = self.options.get("seed", 42)
titles = self.options.get("titles", False)
width = self.options.get("width", None)
env = self.state.document.settings.env
app = env.app
gallery_dir = app.builder.config.altair_gallery_dir
examples = populate_examples()
if names:
if len(names) < size:
raise ValueError(
"altair-minigallery: if names are specified, "
"the list must be at least as long as size."
)
mapping = {example["name"]: example for example in examples}
examples = [mapping[name] for name in names]
else:
if indices:
examples = [examples[i] for i in indices]
if shuffle:
random.seed(seed)
random.shuffle(examples)
if size:
examples = examples[:size]
include = MINIGALLERY_TEMPLATE.render(
image_dir="/_static",
gallery_dir=gallery_dir,
examples=examples,
titles=titles,
width=width,
)
# parse and return documentation
result = ViewList()
for line in include.split("\n"):
result.append(line, "<altair-minigallery>")
node = nodes.paragraph()
node.document = self.state.document
nested_parse_with_titles(self.state, result, node)
return node.children
def main(app):
gallery_dir = app.builder.config.altair_gallery_dir
target_dir = os.path.join(app.builder.srcdir, gallery_dir)
image_dir = os.path.join(app.builder.srcdir, "_images")
gallery_ref = app.builder.config.altair_gallery_ref
gallery_title = app.builder.config.altair_gallery_title
examples = populate_examples(gallery_ref=gallery_ref, code_below=True, strict=False)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
examples = sorted(examples, key=lambda x: x["title"])
examples_toc = collections.OrderedDict(
{
"Simple Charts": [],
"Bar Charts": [],
"Line Charts": [],
"Area Charts": [],
"Circular Plots": [],
"Scatter Plots": [],
"Histograms": [],
"Maps": [],
"Interactive Charts": [],
"Case Studies": [],
"Other Charts": [],
}
)
for d in examples:
examples_toc[d["category"]].append(d)
# Write the gallery index file
with open(os.path.join(target_dir, "index.rst"), "w") as f:
f.write(
GALLERY_TEMPLATE.render(
title=gallery_title,
examples=examples_toc.items(),
image_dir="/_static",
gallery_ref=gallery_ref,
)
)
# save the images to file
save_example_pngs(examples, image_dir)
# Write the individual example files
for prev_ex, example, next_ex in prev_this_next(examples):
if prev_ex:
example["prev_ref"] = "gallery_{name}".format(**prev_ex)
if next_ex:
example["next_ref"] = "gallery_{name}".format(**next_ex)
target_filename = os.path.join(target_dir, example["name"] + ".rst")
with open(os.path.join(target_filename), "w", encoding="utf-8") as f:
f.write(EXAMPLE_TEMPLATE.render(example))
def setup(app):
app.connect("builder-inited", main)
app.add_css_file("altair-gallery.css")
app.add_config_value("altair_gallery_dir", "gallery", "env")
app.add_config_value("altair_gallery_ref", "example-gallery", "env")
app.add_config_value("altair_gallery_title", "Example Gallery", "env")
app.add_directive_to_domain("py", "altair-minigallery", AltairMiniGalleryDirective)
|
[
"jinja2.Template",
"json.dump",
"json.load",
"os.makedirs",
"random.shuffle",
"os.path.exists",
"docutils.nodes.paragraph",
"sphinx.util.nodes.nested_parse_with_titles",
"altair.examples.iter_examples",
"docutils.statemachine.ViewList",
"random.seed",
"collections.OrderedDict",
"warnings.warn",
"altair.utils.execeval.eval_block",
"os.path.join",
"operator.itemgetter"
] |
[((617, 1895), 'jinja2.Template', 'jinja2.Template', (['"""\n.. This document is auto-generated by the altair-gallery extension. Do not modify directly.\n\n.. _{{ gallery_ref }}:\n\n{{ title }}\n{% for char in title %}-{% endfor %}\n\nThis gallery contains a selection of examples of the plots Altair can create.\n\nSome may seem fairly complicated at first glance, but they are built by combining a simple set of declarative building blocks.\n\nMany draw upon sample datasets compiled by the `Vega <https://vega.github.io/vega/>`_ project. To access them yourself, install `vega_datasets <https://github.com/altair-viz/vega_datasets>`_.\n\n.. code-block:: none\n\n $ pip install vega_datasets\n\n{% for grouper, group in examples %}\n\n.. _gallery-category-{{ grouper }}:\n\n{{ grouper }}\n{% for char in grouper %}~{% endfor %}\n\n.. raw:: html\n\n <span class="gallery">\n {% for example in group %}\n <a class="imagegroup" href="{{ example.name }}.html">\n <span class="image" alt="{{ example.title }}" style="background-image: url({{ image_dir }}/{{ example.name }}-thumb.png);"></span>\n <span class="image-title">{{ example.title }}</span>\n </a>\n {% endfor %}\n </span>\n\n <div style=\'clear:both;\'></div>\n\n.. toctree::\n :hidden:\n{% for example in group %}\n {{ example.name }}\n{%- endfor %}\n\n{% endfor %}\n"""'], {}), '(\n """\n.. This document is auto-generated by the altair-gallery extension. Do not modify directly.\n\n.. _{{ gallery_ref }}:\n\n{{ title }}\n{% for char in title %}-{% endfor %}\n\nThis gallery contains a selection of examples of the plots Altair can create.\n\nSome may seem fairly complicated at first glance, but they are built by combining a simple set of declarative building blocks.\n\nMany draw upon sample datasets compiled by the `Vega <https://vega.github.io/vega/>`_ project. To access them yourself, install `vega_datasets <https://github.com/altair-viz/vega_datasets>`_.\n\n.. code-block:: none\n\n $ pip install vega_datasets\n\n{% for grouper, group in examples %}\n\n.. _gallery-category-{{ grouper }}:\n\n{{ grouper }}\n{% for char in grouper %}~{% endfor %}\n\n.. raw:: html\n\n <span class="gallery">\n {% for example in group %}\n <a class="imagegroup" href="{{ example.name }}.html">\n <span class="image" alt="{{ example.title }}" style="background-image: url({{ image_dir }}/{{ example.name }}-thumb.png);"></span>\n <span class="image-title">{{ example.title }}</span>\n </a>\n {% endfor %}\n </span>\n\n <div style=\'clear:both;\'></div>\n\n.. toctree::\n :hidden:\n{% for example in group %}\n {{ example.name }}\n{%- endfor %}\n\n{% endfor %}\n"""\n )\n', (632, 1895), False, 'import jinja2\n'), ((1916, 2254), 'jinja2.Template', 'jinja2.Template', (['"""\n.. raw:: html\n\n <div id="showcase">\n <div class="examples">\n {% for example in examples %}\n <a class="preview" href="{{ gallery_dir }}/{{ example.name }}.html" style="background-image: url({{ image_dir }}/{{ example.name }}-thumb.png)"></a>\n {% endfor %}\n </div>\n </div>\n"""'], {}), '(\n """\n.. raw:: html\n\n <div id="showcase">\n <div class="examples">\n {% for example in examples %}\n <a class="preview" href="{{ gallery_dir }}/{{ example.name }}.html" style="background-image: url({{ image_dir }}/{{ example.name }}-thumb.png)"></a>\n {% endfor %}\n </div>\n </div>\n"""\n )\n', (1931, 2254), False, 'import jinja2\n'), ((2272, 2598), 'jinja2.Template', 'jinja2.Template', (['"""\n.. This document is auto-generated by the altair-gallery extension. Do not modify directly.\n\n.. _gallery_{{ name }}:\n\n{{ docstring }}\n\n.. altair-plot::\n {% if code_below %}:code-below:{% endif %}\n {% if strict %}:strict:{% endif %}\n\n {{ code | indent(4) }}\n\n.. toctree::\n :hidden:\n"""'], {}), '(\n """\n.. This document is auto-generated by the altair-gallery extension. Do not modify directly.\n\n.. _gallery_{{ name }}:\n\n{{ docstring }}\n\n.. altair-plot::\n {% if code_below %}:code-below:{% endif %}\n {% if strict %}:strict:{% endif %}\n\n {{ code | indent(4) }}\n\n.. toctree::\n :hidden:\n"""\n )\n', (2287, 2598), False, 'import jinja2\n'), ((2876, 2921), 'os.path.join', 'os.path.join', (['image_dir', '"""_image_hashes.json"""'], {}), "(image_dir, '_image_hashes.json')\n", (2888, 2921), False, 'import os\n'), ((2930, 2955), 'os.path.exists', 'os.path.exists', (['hash_file'], {}), '(hash_file)\n', (2944, 2955), False, 'import os\n'), ((7105, 7150), 'os.path.join', 'os.path.join', (['app.builder.srcdir', 'gallery_dir'], {}), '(app.builder.srcdir, gallery_dir)\n', (7117, 7150), False, 'import os\n'), ((7167, 7210), 'os.path.join', 'os.path.join', (['app.builder.srcdir', '"""_images"""'], {}), "(app.builder.srcdir, '_images')\n", (7179, 7210), False, 'import os\n'), ((7567, 7820), 'collections.OrderedDict', 'collections.OrderedDict', (["{'Simple Charts': [], 'Bar Charts': [], 'Line Charts': [], 'Area Charts': [\n ], 'Circular Plots': [], 'Scatter Plots': [], 'Histograms': [], 'Maps':\n [], 'Interactive Charts': [], 'Case Studies': [], 'Other Charts': []}"], {}), "({'Simple Charts': [], 'Bar Charts': [],\n 'Line Charts': [], 'Area Charts': [], 'Circular Plots': [],\n 'Scatter Plots': [], 'Histograms': [], 'Maps': [], 'Interactive Charts':\n [], 'Case Studies': [], 'Other Charts': []})\n", (7590, 7820), False, 'import collections\n'), ((2730, 2755), 'os.path.exists', 'os.path.exists', (['image_dir'], {}), '(image_dir)\n', (2744, 2755), False, 'import os\n'), ((2765, 2787), 'os.makedirs', 'os.makedirs', (['image_dir'], {}), '(image_dir)\n', (2776, 2787), False, 'import os\n'), ((3151, 3184), 'os.path.join', 'os.path.join', (['image_dir', 'filename'], {}), '(image_dir, filename)\n', (3163, 3184), False, 'import os\n'), ((4316, 4336), 'json.dump', 'json.dump', (['hashes', 'f'], {}), '(hashes, f)\n', (4325, 4336), False, 'import json\n'), ((4452, 4467), 'altair.examples.iter_examples', 'iter_examples', ([], {}), '()\n', (4465, 4467), False, 'from altair.examples import iter_examples\n'), ((6741, 6751), 'docutils.statemachine.ViewList', 'ViewList', ([], {}), '()\n', (6749, 6751), False, 'from docutils.statemachine import ViewList\n'), ((6864, 6881), 'docutils.nodes.paragraph', 'nodes.paragraph', ([], {}), '()\n', (6879, 6881), False, 'from docutils import nodes\n'), ((6934, 6984), 'sphinx.util.nodes.nested_parse_with_titles', 'nested_parse_with_titles', (['self.state', 'result', 'node'], {}), '(self.state, result, node)\n', (6958, 6984), False, 'from sphinx.util.nodes import nested_parse_with_titles\n'), ((7429, 7455), 'os.path.exists', 'os.path.exists', (['target_dir'], {}), '(target_dir)\n', (7443, 7455), False, 'import os\n'), ((7465, 7488), 'os.makedirs', 'os.makedirs', (['target_dir'], {}), '(target_dir)\n', (7476, 7488), False, 'import os\n'), ((8759, 8809), 'os.path.join', 'os.path.join', (['target_dir', "(example['name'] + '.rst')"], {}), "(target_dir, example['name'] + '.rst')\n", (8771, 8809), False, 'import os\n'), ((3013, 3025), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3022, 3025), False, 'import json\n'), ((3352, 3378), 'os.path.exists', 'os.path.exists', (['image_file'], {}), '(image_file)\n', (3366, 3378), False, 'import os\n'), ((3604, 3631), 'altair.utils.execeval.eval_block', 'eval_block', (["example['code']"], {}), "(example['code'])\n", (3614, 3631), False, 'from altair.utils.execeval import eval_block\n'), ((4086, 4141), 'os.path.join', 'os.path.join', (['image_dir', "(example['name'] + '-thumb.png')"], {}), "(image_dir, example['name'] + '-thumb.png')\n", (4098, 4141), False, 'import os\n'), ((4473, 4491), 'operator.itemgetter', 'itemgetter', (['"""name"""'], {}), "('name')\n", (4483, 4491), False, 'from operator import itemgetter\n'), ((8085, 8122), 'os.path.join', 'os.path.join', (['target_dir', '"""index.rst"""'], {}), "(target_dir, 'index.rst')\n", (8097, 8122), False, 'import os\n'), ((3953, 3973), 'json.dump', 'json.dump', (['hashes', 'f'], {}), '(hashes, f)\n', (3962, 3973), False, 'import json\n'), ((6347, 6364), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (6358, 6364), False, 'import random\n'), ((6381, 6405), 'random.shuffle', 'random.shuffle', (['examples'], {}), '(examples)\n', (6395, 6405), False, 'import random\n'), ((8828, 8857), 'os.path.join', 'os.path.join', (['target_filename'], {}), '(target_filename)\n', (8840, 8857), False, 'import os\n'), ((3784, 3842), 'warnings.warn', 'warnings.warn', (['"""Unable to save image: using generic image"""'], {}), "('Unable to save image: using generic image')\n", (3797, 3842), False, 'import warnings\n')]
|
#!/usr/local/bin/python
# coding=utf-8
# Some minor code fixup and Hungarian translation by Czompi.
# smartmirror.py
# requirements
# requests, feedparser, traceback, Pillow
import os, sys
import config
from tkinter import *
import locale
import threading
import time
import requests
import json
import traceback
import feedparser
from PIL import Image, ImageTk
from contextlib import contextmanager
LOCALE_LOCK = threading.Lock()
@contextmanager
def setlocale(name): #thread proof function to work with locale
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
try:
yield locale.setlocale(locale.LC_ALL, name)
finally:
locale.setlocale(locale.LC_ALL, saved)
# maps open weather icons to local assets
icon_lookup = {
'01d': "assets/Sun.png",
'01n': "assets/Moon.png",
'02d': "assets/PartlySunny.png",
'02n': "assets/PartlyMoon.png",
'03d': "assets/Cloud.png",
'03n': "assets/Cloud.png",
'04d': "assets/Cloud.png",
'04n': "assets/Cloud.png",
'09d': "assets/Rain.png",
'09n': "assets/Rain.png",
'10d': "assets/Rain.png",
'10n': "assets/Rain.png",
'11d': "assets/Storm.png",
'11n': "assets/Storm.png",
'13d': "assets/Snow.png",
'13n': "assets/Snow.png",
'50d': "assets/Haze.png",
'50n': "assets/Haze.png",
}
class Clock(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
# initialize time label
self.time1 = ''
self.timeLbl = Label(self, font=('Helvetica', config.large_text_size), fg="white", bg="black")
self.timeLbl.pack(side=TOP, anchor=E)
# initialize day of week
self.day_of_week1 = ''
self.dayOWLbl = Label(self, text=self.day_of_week1, font=('Helvetica', config.small_text_size), fg="white", bg="black")
self.dayOWLbl.pack(side=TOP, anchor=E)
# initialize date label
self.date1 = ''
self.dateLbl = Label(self, text=self.date1, font=('Helvetica', config.small_text_size), fg="white", bg="black")
self.dateLbl.pack(side=TOP, anchor=E)
self.tick()
def tick(self):
with setlocale(config.ui_locale):
if config.time_format == 12:
time2 = time.strftime('%I:%M %p') #hour in 12h format
else:
time2 = time.strftime('%H:%M') #hour in 24h format
day_of_week2 = time.strftime('%A')
date2 = time.strftime(config.date_format)
# if time string has changed, update it
if time2 != self.time1:
self.time1 = time2
self.timeLbl.config(text=time2)
if day_of_week2 != self.day_of_week1:
self.day_of_week1 = day_of_week2
self.dayOWLbl.config(text=day_of_week2)
if date2 != self.date1:
self.date1 = date2
self.dateLbl.config(text=date2)
# calls itself every 200 milliseconds
# to update the time display as needed
# could use >200 ms, but display gets jerky
self.timeLbl.after(200, self.tick)
class Weather(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
self.temperature = ''
self.forecast = ''
self.location = ''
self.currently = ''
self.icon = ''
self.degreeFrm = Frame(self, bg="black")
self.degreeFrm.pack(side=TOP, anchor=W)
self.temperatureLbl = Label(self.degreeFrm, font=('Helvetica', config.xlarge_text_size), fg="white", bg="black")
self.temperatureLbl.pack(side=LEFT, anchor=N)
self.iconLbl = Label(self.degreeFrm, bg="black")
self.iconLbl.pack(side=LEFT, anchor=N, padx=20)
self.currentlyLbl = Label(self, font=('Helvetica', config.medium_text_size), fg="white", bg="black")
self.currentlyLbl.pack(side=TOP, anchor=W)
self.forecastLbl = Label(self, font=('Helvetica', config.small_text_size), fg="white", bg="black")
self.forecastLbl.pack(side=TOP, anchor=W)
self.locationLbl = Label(self, font=('Helvetica', config.small_text_size), fg="white", bg="black")
self.locationLbl.pack(side=TOP, anchor=W)
self.get_weather()
def get_ip(self):
try:
ip_url = "https://api.ipify.org/?format=json"
req = requests.get(ip_url)
ip_json = json.loads(req.text)
return ip_json['ip']
except Exception as e:
traceback.print_exc()
return "Error: %s. Sikertelen az IP cím lekérdezése." % e
def get_weather(self):
try:
if config.latitude is None and config.longitude is None:
# get location
location_req_url = "https://freegeoip.app/json/%s" % self.get_ip()
r = requests.get(location_req_url)
location_obj = json.loads(r.text)
lat = location_obj['latitude']
lon = location_obj['longitude']
location2 = "%s, %s" % (location_obj['city'], location_obj['region_code'])
# get weather
weather_req_url = "https://api.openweathermap.org/data/2.5/onecall?lat=%s&lon=%s&lang=%s&exclude=minutely,hourly&units=%s&appid=%s" % (lat, lon, config.weather_lang, config.weather_unit, config.weather_api_token)
else:
location2 = ""
# get weather
weather_req_url = "https://api.openweathermap.org/data/2.5/onecall?lat=%s&lon=%s&lang=%s&exclude=minutely,hourly&units=%s&appid=%s" % (config.latitude, config.longitude, config.weather_lang, config.weather_unit, config.weather_api_token)
print(weather_req_url)
r = requests.get(weather_req_url)
weather_obj = json.loads(r.text)
print(weather_obj)
degree_sign= u'\N{DEGREE SIGN}'
temperature2 = "%s%s" % (str(int(weather_obj['current']['temp'])), degree_sign)
currently2 = weather_obj['current']['weather'][0]['description']
forecast2 = weather_obj['daily'][0]['weather'][0]['description']
icon_id = weather_obj['current']['weather'][0]['icon']
icon2 = None
if icon_id in icon_lookup:
icon2 = icon_lookup[icon_id]
if icon2 is not None:
if self.icon != icon2:
self.icon = icon2
image = Image.open(icon2)
image = image.resize((100, 100), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLbl.config(image=photo)
self.iconLbl.image = photo
else:
# remove image
self.iconLbl.config(image='')
if self.currently != currently2:
self.currently = currently2
self.currentlyLbl.config(text=currently2)
if self.forecast != forecast2:
self.forecast = forecast2
self.forecastLbl.config(text=forecast2)
if self.temperature != temperature2:
self.temperature = temperature2
self.temperatureLbl.config(text=temperature2)
if self.location != location2:
if location2 == ", ":
self.location = "Nem behatárolható pozíció"
self.locationLbl.config(text="Nem behatárolható pozíció")
else:
self.location = location2
self.locationLbl.config(text=location2)
except Exception as e:
traceback.print_exc()
print("Hiba: %s. Az időjárás lekérdezése sikertelen." % e)
self.after(600000, self.get_weather)
@staticmethod
def convert_kelvin_to_fahrenheit(kelvin_temp):
return 1.8 * (kelvin_temp - 273) + 32
class News(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.config(bg='black')
self.title = 'Hírek' # 'News' is more internationally generic
self.newsLbl = Label(self, text=self.title, font=('Helvetica', config.medium_text_size), fg="white", bg="black")
self.newsLbl.pack(side=TOP, anchor=W)
self.headlinesContainer = Frame(self, bg="black")
self.headlinesContainer.pack(side=TOP)
self.get_headlines()
def get_headlines(self):
try:
# remove all children
for widget in self.headlinesContainer.winfo_children():
widget.destroy()
if config.news_country_code == None:
headlines_url = "https://news.google.com/news?ned=us&output=rss"
else:
headlines_url = "https://news.google.com/rss?hl=hu&gl=hu&ceid=HU:hu"
feed = feedparser.parse(headlines_url)
for post in feed.entries[0:5]:
headline = NewsHeadline(self.headlinesContainer, post.title)
headline.pack(side=TOP, anchor=W)
except Exception as e:
traceback.print_exc()
print("Hiba: %s. A hírek lekérdezése sikertelen." % e)
self.after(600000, self.get_headlines)
class NewsHeadline(Frame):
def __init__(self, parent, event_name=""):
Frame.__init__(self, parent, bg='black')
image = Image.open("assets/Newspaper.png")
image = image.resize((25, 25), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLbl = Label(self, bg='black', image=photo)
self.iconLbl.image = photo
self.iconLbl.pack(side=LEFT, anchor=N)
self.eventName = event_name
self.eventNameLbl = Label(self, text=self.eventName, font=('Helvetica', config.small_text_size), fg="white", bg="black")
self.eventNameLbl.pack(side=LEFT, anchor=N)
class Calendar(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
self.title = 'Naptári események'
self.calendarLbl = Label(self, text=self.title, font=('Helvetica', config.medium_text_size), fg="white", bg="black")
self.calendarLbl.pack(side=TOP, anchor=E)
self.calendarEventContainer = Frame(self, bg='black')
self.calendarEventContainer.pack(side=TOP, anchor=E)
self.get_events()
def get_events(self):
#TODO: implement this method
# reference https://developers.google.com/google-apps/calendar/quickstart/python
# remove all children
for widget in self.calendarEventContainer.winfo_children():
widget.destroy()
calendar_event = CalendarEvent(self.calendarEventContainer)
calendar_event.pack(side=TOP, anchor=E)
pass
class CalendarEvent(Frame):
def __init__(self, parent, event_name="Event 1"):
Frame.__init__(self, parent, bg='black')
self.eventName = event_name
self.eventNameLbl = Label(self, text=self.eventName, font=('Helvetica', config.small_text_size), fg="white", bg="black")
self.eventNameLbl.pack(side=TOP, anchor=E)
class FullscreenWindow:
def __init__(self):
self.tk = Tk()
self.tk.configure(background='black')
self.topFrame = Frame(self.tk, background = 'black')
self.bottomFrame = Frame(self.tk, background = 'black')
self.topFrame.pack(side = TOP, fill=BOTH, expand = YES)
self.bottomFrame.pack(side = BOTTOM, fill=BOTH, expand = YES)
self.state = False
self.tk.bind("<Return>", self.toggle_fullscreen)
self.tk.bind("<Escape>", self.end_fullscreen)
# clock
self.clock = Clock(self.topFrame)
self.clock.pack(side=RIGHT, anchor=N, padx=100, pady=60)
# weather
self.weather = Weather(self.topFrame)
self.weather.pack(side=LEFT, anchor=N, padx=100, pady=60)
# news
self.news = News(self.bottomFrame)
self.news.pack(side=LEFT, anchor=S, padx=100, pady=60)
# calender - removing for now
# self.calender = Calendar(self.bottomFrame)
# self.calender.pack(side = RIGHT, anchor=S, padx=100, pady=60)
def toggle_fullscreen(self, event=None):
self.state = not self.state # Just toggling the boolean
self.tk.attributes("-fullscreen", self.state)
return "break"
def end_fullscreen(self, event=None):
self.state = False
self.tk.attributes("-fullscreen", False)
return "break"
if __name__ == '__main__':
w = FullscreenWindow()
w.tk.mainloop()
|
[
"feedparser.parse",
"PIL.ImageTk.PhotoImage",
"traceback.print_exc",
"json.loads",
"time.strftime",
"PIL.Image.open",
"threading.Lock",
"requests.get",
"locale.setlocale"
] |
[((420, 436), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (434, 436), False, 'import threading\n'), ((556, 587), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL'], {}), '(locale.LC_ALL)\n', (572, 587), False, 'import locale\n'), ((9487, 9521), 'PIL.Image.open', 'Image.open', (['"""assets/Newspaper.png"""'], {}), "('assets/Newspaper.png')\n", (9497, 9521), False, 'from PIL import Image, ImageTk\n'), ((9631, 9656), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['image'], {}), '(image)\n', (9649, 9656), False, 'from PIL import Image, ImageTk\n'), ((686, 724), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', 'saved'], {}), '(locale.LC_ALL, saved)\n', (702, 724), False, 'import locale\n'), ((2437, 2456), 'time.strftime', 'time.strftime', (['"""%A"""'], {}), "('%A')\n", (2450, 2456), False, 'import time\n'), ((2477, 2510), 'time.strftime', 'time.strftime', (['config.date_format'], {}), '(config.date_format)\n', (2490, 2510), False, 'import time\n'), ((4415, 4435), 'requests.get', 'requests.get', (['ip_url'], {}), '(ip_url)\n', (4427, 4435), False, 'import requests\n'), ((4458, 4478), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (4468, 4478), False, 'import json\n'), ((5806, 5835), 'requests.get', 'requests.get', (['weather_req_url'], {}), '(weather_req_url)\n', (5818, 5835), False, 'import requests\n'), ((5862, 5880), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (5872, 5880), False, 'import json\n'), ((8962, 8993), 'feedparser.parse', 'feedparser.parse', (['headlines_url'], {}), '(headlines_url)\n', (8978, 8993), False, 'import feedparser\n'), ((619, 656), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', 'name'], {}), '(locale.LC_ALL, name)\n', (635, 656), False, 'import locale\n'), ((2278, 2303), 'time.strftime', 'time.strftime', (['"""%I:%M %p"""'], {}), "('%I:%M %p')\n", (2291, 2303), False, 'import time\n'), ((2366, 2388), 'time.strftime', 'time.strftime', (['"""%H:%M"""'], {}), "('%H:%M')\n", (2379, 2388), False, 'import time\n'), ((4555, 4576), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4574, 4576), False, 'import traceback\n'), ((4892, 4922), 'requests.get', 'requests.get', (['location_req_url'], {}), '(location_req_url)\n', (4904, 4922), False, 'import requests\n'), ((4954, 4972), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (4964, 4972), False, 'import json\n'), ((7749, 7770), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7768, 7770), False, 'import traceback\n'), ((9208, 9229), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (9227, 9229), False, 'import traceback\n'), ((6520, 6537), 'PIL.Image.open', 'Image.open', (['icon2'], {}), '(icon2)\n', (6530, 6537), False, 'from PIL import Image, ImageTk\n'), ((6685, 6710), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['image'], {}), '(image)\n', (6703, 6710), False, 'from PIL import Image, ImageTk\n')]
|
import unittest
from datetime import date
from fincalendar.foreign_exchange_config import get_settlement_day_convention
class FxSettlementDateConvention(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_t_plus_2(self):
basecurrency = 'SGD'
pricingcurrency = 'USD'
self.assertEqual(get_settlement_day_convention(basecurrency, pricingcurrency), 2)
def test_t_plus_0(self):
basecurrency = 'KZT'
pricingcurrency = 'USD'
self.assertEqual(get_settlement_day_convention(basecurrency, pricingcurrency), 0)
def test_t_plus_1(self):
basecurrency = 'PHP'
pricingcurrency = 'CAD'
self.assertEqual(get_settlement_day_convention(basecurrency, pricingcurrency), 1)
def test_ruble_exception(self):
basecurrency = 'GBP'
pricingcurrency = 'RUB'
self.assertEqual(get_settlement_day_convention(basecurrency, pricingcurrency), 2)
basecurrency = 'CNY'
self.assertEqual(get_settlement_day_convention(basecurrency, pricingcurrency), 1)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"fincalendar.foreign_exchange_config.get_settlement_day_convention"
] |
[((1143, 1158), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1156, 1158), False, 'import unittest\n'), ((369, 429), 'fincalendar.foreign_exchange_config.get_settlement_day_convention', 'get_settlement_day_convention', (['basecurrency', 'pricingcurrency'], {}), '(basecurrency, pricingcurrency)\n', (398, 429), False, 'from fincalendar.foreign_exchange_config import get_settlement_day_convention\n'), ((550, 610), 'fincalendar.foreign_exchange_config.get_settlement_day_convention', 'get_settlement_day_convention', (['basecurrency', 'pricingcurrency'], {}), '(basecurrency, pricingcurrency)\n', (579, 610), False, 'from fincalendar.foreign_exchange_config import get_settlement_day_convention\n'), ((731, 791), 'fincalendar.foreign_exchange_config.get_settlement_day_convention', 'get_settlement_day_convention', (['basecurrency', 'pricingcurrency'], {}), '(basecurrency, pricingcurrency)\n', (760, 791), False, 'from fincalendar.foreign_exchange_config import get_settlement_day_convention\n'), ((919, 979), 'fincalendar.foreign_exchange_config.get_settlement_day_convention', 'get_settlement_day_convention', (['basecurrency', 'pricingcurrency'], {}), '(basecurrency, pricingcurrency)\n', (948, 979), False, 'from fincalendar.foreign_exchange_config import get_settlement_day_convention\n'), ((1038, 1098), 'fincalendar.foreign_exchange_config.get_settlement_day_convention', 'get_settlement_day_convention', (['basecurrency', 'pricingcurrency'], {}), '(basecurrency, pricingcurrency)\n', (1067, 1098), False, 'from fincalendar.foreign_exchange_config import get_settlement_day_convention\n')]
|
__author__ = 'Harsh'
import tweepy
import json
from pymongo import MongoClient
import re
#extracting twitter data
def twitter_extract():
auth=tweepy.OAuthHandler('vXcbdBd5ajXqTVwdJEDknvPg1','<KEY>vOUVCmJ2JuZ9iLy9AsDogqhyJM521qDMtVoL49ZO9H')
auth.set_access_token('<KEY>','<KEY>')
api=tweepy.API(auth,wait_on_rate_limit=True)
dict={}
l=['Canada','Dalhousie University','University','Halifax','Canada Education']
for i in l:
canada_res=tweepy.Cursor(api.search,q=i,lang='en').items(1000)
list=[]
for j in canada_res:
list.append(((j.text.encode('ascii','ignore')).decode('ascii'),j.retweet_count,str(j.created_at),j.user.location,j.coordinates))
dict[i]=list
with open ('main_data_twitter.json','w',) as f:
json.dump(dict,f)
twitter_extract()
# creating json file for mongodb, cleaning data, creating text file for spark processing
def json_creation():
main_dict=[]
with open('main_data_twitter.json','r') as f:
f2=open('twitter_texts.txt','w')
dict=json.load(f)
keys=dict.keys()
for i in keys:
for j in dict[i]:
d={}
#cleaning data (removing hyperlinks and special characters and putting space between all words)
j[0]=re.sub('[^A-Za-z0-9]+', ' ', j[0])
j[0]=re.sub(r'http\S+', ' ', j[0])
j[0]=j[0].lower()
#creating dict to save it to mongodb
d['text']=re.sub(r'http\S+', ' ', j[0])
d['retweet']=j[1]
d['created at']=j[2]
d['user location']=j[3]
d['coordinates']=j[4]
main_dict.append(d) # adding to a main dictionary
#writig to a text file for pyspark processing
f2.write(d['text']) # appending to a twitter_text file
f2.close()
f1=open('main_data_news.json','w') # opeining main dict to save the data
json.dump(main_dict,f1)
f1.close()
json_creation() # fetch data from the list ( which containts twitter data ) and saves it to a dictionary and a text file
# for further use.
#saving the dictionary to mongodb
def connect():
connection = MongoClient('localhost', 27017)
database=connection["twitter"]
data=database['data']
f1=open('main_data_news.json','r') # opening main dict to save the data
main_dict=json.load(f1)
print(type(main_dict))
print(connection.list_database_names())
data.insert_many(main_dict)
#data.insert_many(dict)
f1.close()
connect() # fetches the created dictionary to store data in mongodb
|
[
"pymongo.MongoClient",
"json.dump",
"json.load",
"tweepy.API",
"tweepy.Cursor",
"tweepy.OAuthHandler",
"re.sub"
] |
[((148, 251), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['"""vXcbdBd5ajXqTVwdJEDknvPg1"""', '"""<KEY>vOUVCmJ2JuZ9iLy9AsDogqhyJM521qDMtVoL49ZO9H"""'], {}), "('vXcbdBd5ajXqTVwdJEDknvPg1',\n '<KEY>vOUVCmJ2JuZ9iLy9AsDogqhyJM521qDMtVoL49ZO9H')\n", (167, 251), False, 'import tweepy\n'), ((299, 340), 'tweepy.API', 'tweepy.API', (['auth'], {'wait_on_rate_limit': '(True)'}), '(auth, wait_on_rate_limit=True)\n', (309, 340), False, 'import tweepy\n'), ((2268, 2299), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (2279, 2299), False, 'from pymongo import MongoClient\n'), ((2454, 2467), 'json.load', 'json.load', (['f1'], {}), '(f1)\n', (2463, 2467), False, 'import json\n'), ((790, 808), 'json.dump', 'json.dump', (['dict', 'f'], {}), '(dict, f)\n', (799, 808), False, 'import json\n'), ((1058, 1070), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1067, 1070), False, 'import json\n'), ((1999, 2023), 'json.dump', 'json.dump', (['main_dict', 'f1'], {}), '(main_dict, f1)\n', (2008, 2023), False, 'import json\n'), ((469, 510), 'tweepy.Cursor', 'tweepy.Cursor', (['api.search'], {'q': 'i', 'lang': '"""en"""'}), "(api.search, q=i, lang='en')\n", (482, 510), False, 'import tweepy\n'), ((1304, 1338), 're.sub', 're.sub', (['"""[^A-Za-z0-9]+"""', '""" """', 'j[0]'], {}), "('[^A-Za-z0-9]+', ' ', j[0])\n", (1310, 1338), False, 'import re\n'), ((1360, 1389), 're.sub', 're.sub', (['"""http\\\\S+"""', '""" """', 'j[0]'], {}), "('http\\\\S+', ' ', j[0])\n", (1366, 1389), False, 'import re\n'), ((1504, 1533), 're.sub', 're.sub', (['"""http\\\\S+"""', '""" """', 'j[0]'], {}), "('http\\\\S+', ' ', j[0])\n", (1510, 1533), False, 'import re\n')]
|
from flask import Blueprint
web = Blueprint('web', __name__)
from . import views, errors
|
[
"flask.Blueprint"
] |
[((35, 61), 'flask.Blueprint', 'Blueprint', (['"""web"""', '__name__'], {}), "('web', __name__)\n", (44, 61), False, 'from flask import Blueprint\n')]
|
import cv2
import numpy as np
from PIL import Image
import os
path = "./photo/myobject"
filelist = os.listdir(path)
total_num = len(filelist)
n = 6
for i in range(1,total_num):
n = 6 - len(str(i))
filepath = "./photo/myobject/"+str(0)*n + str(i)+".jpg"
imgsize = Image.open(filepath)#開啟圖片
print('開啟檔案:' + filepath)
for a in range(1,101):
for b in range(1,101):
print('round %s_%s_%s' % (str(i),str(a),str(b)))
img = cv2.imread(filepath)
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (a,b,imgsize.size[0],imgsize.size[1])
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask2[:,:,np.newaxis]
print('輸出檔案:%s_%s_%s.jpg' %(str(0)*n + str(i),str(a),str(b)))
cv2.imwrite('./save/%s_%s_%s.jpg' %(str(0)*n + str(i),str(a),str(b)),img)
|
[
"cv2.grabCut",
"numpy.zeros",
"PIL.Image.open",
"cv2.imread",
"numpy.where",
"os.listdir"
] |
[((101, 117), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (111, 117), False, 'import os\n'), ((278, 298), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (288, 298), False, 'from PIL import Image\n'), ((471, 491), 'cv2.imread', 'cv2.imread', (['filepath'], {}), '(filepath)\n', (481, 491), False, 'import cv2\n'), ((511, 544), 'numpy.zeros', 'np.zeros', (['img.shape[:2]', 'np.uint8'], {}), '(img.shape[:2], np.uint8)\n', (519, 544), True, 'import numpy as np\n'), ((567, 596), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (575, 596), True, 'import numpy as np\n'), ((618, 647), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (626, 647), True, 'import numpy as np\n'), ((715, 789), 'cv2.grabCut', 'cv2.grabCut', (['img', 'mask', 'rect', 'bgdModel', 'fgdModel', '(5)', 'cv2.GC_INIT_WITH_RECT'], {}), '(img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT)\n', (726, 789), False, 'import cv2\n'), ((804, 845), 'numpy.where', 'np.where', (['((mask == 2) | (mask == 0))', '(0)', '(1)'], {}), '((mask == 2) | (mask == 0), 0, 1)\n', (812, 845), True, 'import numpy as np\n')]
|
# factories.py
import factory
from factory.django import DjangoModelFactory
from test_data.models import Person, Thread, Comment, Club
# Defining a factory
class UserFactory(DjangoModelFactory):
class Meta:
model = Person
name = factory.Faker("first_name")
class ThreadFactory(DjangoModelFactory):
class Meta:
model = Thread
title = factory.Faker("sentence", nb_words=5, variable_nb_words=True)
creator = factory.SubFactory(UserFactory)
class CommentFactory(DjangoModelFactory):
class Meta:
model = Comment
body = factory.Faker("sentence", nb_words=15, variable_nb_words=True)
poster = factory.SubFactory(UserFactory)
thread = factory.SubFactory(ThreadFactory)
class ClubFactory(DjangoModelFactory):
class Meta:
model = Club
name = factory.Faker("sentence", nb_words=5, variable_nb_words=True)
# member = factory.SubFactory(UserFactory)
|
[
"factory.SubFactory",
"factory.Faker"
] |
[((248, 275), 'factory.Faker', 'factory.Faker', (['"""first_name"""'], {}), "('first_name')\n", (261, 275), False, 'import factory\n'), ((371, 432), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(5)', 'variable_nb_words': '(True)'}), "('sentence', nb_words=5, variable_nb_words=True)\n", (384, 432), False, 'import factory\n'), ((447, 478), 'factory.SubFactory', 'factory.SubFactory', (['UserFactory'], {}), '(UserFactory)\n', (465, 478), False, 'import factory\n'), ((574, 636), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(15)', 'variable_nb_words': '(True)'}), "('sentence', nb_words=15, variable_nb_words=True)\n", (587, 636), False, 'import factory\n'), ((650, 681), 'factory.SubFactory', 'factory.SubFactory', (['UserFactory'], {}), '(UserFactory)\n', (668, 681), False, 'import factory\n'), ((695, 728), 'factory.SubFactory', 'factory.SubFactory', (['ThreadFactory'], {}), '(ThreadFactory)\n', (713, 728), False, 'import factory\n'), ((818, 879), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(5)', 'variable_nb_words': '(True)'}), "('sentence', nb_words=5, variable_nb_words=True)\n", (831, 879), False, 'import factory\n')]
|
from service import formatter
from . import GeorefMockTest
class FormattingTest(GeorefMockTest):
def test_fields_list_to_dict(self):
"""El resultado de fields_list_to_dict debería ser un diccionario
equivalente a los valores de una lista, desaplanados."""
fields = (
'id',
'nombre',
'provincia.id',
'provincia.nombre',
'ubicacion.lat',
'ubicacion.lon',
'prueba.foo.bar',
'prueba.foo.baz'
)
fields_dict = formatter.fields_list_to_dict(fields)
self.assertEqual(fields_dict, {
'id': True,
'nombre': True,
'provincia': {
'id': True,
'nombre': True
},
'ubicacion': {
'lat': True,
'lon': True
},
'prueba': {
'foo': {
'bar': True,
'baz': True
}
}
})
def test_flatten_dict(self):
"""Se debería aplanar un diccionario correctamente."""
original = {
'provincia': {
'id': '06',
'nombre': '<NAME>'
},
'foo': 'bar'
}
formatter.flatten_dict(original)
self.assertEqual(original, {
'provincia_id': '06',
'provincia_nombre': '<NAME>',
'foo': 'bar'
})
def test_flatten_dict_max_depth(self):
"""El aplanado de diccionarios debería fallar con diccionarios
demasiado profundos."""
deep_dict = {
'a': {
'b': {
'c': {
'd': {}
}
}
}
}
with self.assertRaises(RuntimeError):
formatter.flatten_dict(deep_dict)
def test_flatten_dict_max_depth_circular(self):
"""La conversión a XML debería fallar con diccionarios o listas
con referencias circulares."""
c_dict = {}
c_dict['a'] = c_dict
with self.assertRaises(RuntimeError):
formatter.flatten_dict(c_dict)
def test_filter_result_fields(self):
"""Se debería poder filtrar los campos de un diccionario, utilizando
otro diccionario para especificar cuáles campos deberían ser
mantenidos."""
result = {
'simple': 'foo',
'removed': 'foo',
'nested': {
'field1': 'foo',
'field2': 'foo',
'removed': 'foo',
'nested2': {
'field1': 'foo',
'removed': 'foo'
}
}
}
fields = (
'simple',
'nested.field1',
'nested.field2',
'nested.nested2.field1'
)
formatter.filter_result_fields(result,
formatter.fields_list_to_dict(fields))
self.assertEqual(result, {
'simple': 'foo',
'nested': {
'field1': 'foo',
'field2': 'foo',
'nested2': {
'field1': 'foo'
}
}
})
def test_xml_max_depth(self):
"""La conversión a XML debería fallar con diccionarios demasiado
profundos."""
deep_dict = {
'a': {
'b': {
'c': {
'd': {}
}
}
}
}
with self.assertRaises(RuntimeError):
formatter.value_to_xml('test', deep_dict, max_depth=3)
def test_xml_max_depth_circular(self):
"""La conversión a XML debería fallar con diccionarios o listas
con referencias circulares."""
c_dict = {}
c_dict['a'] = c_dict
with self.assertRaises(RuntimeError):
formatter.value_to_xml('test', c_dict)
def test_xml_structure(self):
"""El nodo raíz de todas las respuestas XML debería ser el tag
'georef-ar-api'."""
self.set_msearch_results([])
resp = self.get_response(params={'formato': 'xml'},
endpoint='/api/provincias',
entity='provincias')
self.assertEqual(resp.tag, 'georef-ar-api')
|
[
"service.formatter.flatten_dict",
"service.formatter.value_to_xml",
"service.formatter.fields_list_to_dict"
] |
[((547, 584), 'service.formatter.fields_list_to_dict', 'formatter.fields_list_to_dict', (['fields'], {}), '(fields)\n', (576, 584), False, 'from service import formatter\n'), ((1301, 1333), 'service.formatter.flatten_dict', 'formatter.flatten_dict', (['original'], {}), '(original)\n', (1323, 1333), False, 'from service import formatter\n'), ((1876, 1909), 'service.formatter.flatten_dict', 'formatter.flatten_dict', (['deep_dict'], {}), '(deep_dict)\n', (1898, 1909), False, 'from service import formatter\n'), ((2182, 2212), 'service.formatter.flatten_dict', 'formatter.flatten_dict', (['c_dict'], {}), '(c_dict)\n', (2204, 2212), False, 'from service import formatter\n'), ((3004, 3041), 'service.formatter.fields_list_to_dict', 'formatter.fields_list_to_dict', (['fields'], {}), '(fields)\n', (3033, 3041), False, 'from service import formatter\n'), ((3681, 3735), 'service.formatter.value_to_xml', 'formatter.value_to_xml', (['"""test"""', 'deep_dict'], {'max_depth': '(3)'}), "('test', deep_dict, max_depth=3)\n", (3703, 3735), False, 'from service import formatter\n'), ((3999, 4037), 'service.formatter.value_to_xml', 'formatter.value_to_xml', (['"""test"""', 'c_dict'], {}), "('test', c_dict)\n", (4021, 4037), False, 'from service import formatter\n')]
|
import random, math, flag
from functions import *
class Robot:
def __init__(self, screen, color, name):
self.screen = screen
self.name = name
self.row = 0
self.col = 0
self.color = color
self.sides = 3
self.radius = 40*scaling
self.direction = 'east'
self.checkpoint = (0,0)
self.next_action = ''
self.text = font.render('', True,yellow)
self.blink = False
self.flags = []
self.hp = 8
self.max_hp = 12
self.laser_end = None
def setStart(self, row, col):
self.row = row
self.col = col
self.checkpoint = (row,col)
def flagsINeed(self, flags):
#Returns a list of flags that this robot still needs
to_return = []
for f in flags:
if not(f.color in self.flags):
to_return.append(f)
return to_return
def turnTowardDirection(self, direction):
'''Pre: direction is a string, not an int.
This function has a bias toward turning left.
This function will request a turn even if the
bot is currently facing the desired direction.
Post: returns a string, left or right,
representing quickest rotation to turn to face
direction.'''
if direction == 'north':
if self.direction == 'west':
return 'right'
else:
return 'left'
elif direction == 'south':
if self.direction == 'east':
return 'right'
else:
return 'left'
elif direction == 'east':
if self.direction == 'north':
return 'right'
else:
return 'left'
else: #elif direction == 'west':
if self.direction == 'south':
return 'right'
else:
return 'left'
def shootLaser(self, board, players):
#Get the limit of the laser in terms of walls before checking player collisions
row_limit, col_limit = blockedByWall(board, self.row, self.col, self.direction)
#Check for a robot that got hit
robot_hit = None
row_change,col_change = getRowColChange(self.direction)
temp_row = self.row + row_change
temp_col = self.col + col_change
p = getPlayerAt(players, temp_row, temp_col)
while p==None and not(temp_row==row_limit and temp_col==col_limit):
temp_row = temp_row + row_change
temp_col = temp_col + col_change
p = getPlayerAt(players, temp_row, temp_col)
#If a player got hit, deal 3 damage
if p!=None:
p.hp -= 3
#Remember previous row and col so you know where to send the robot
temp_row = p.row
temp_col = p.col
#Check for sending player back to checkpoint
if p.hp <= 0:
p.hp = 1
p.row,p.col = p.checkpoint
return temp_row,temp_col
#Return the end location of the laser
else:
return row_limit,col_limit
def doAction(self, board, players, flags, action):
if action == 'left':
self.rotateLeft()
elif action == 'right':
self.rotateRight()
elif action == 'move':
self.move(board, players, flags)
elif action == 'zap':
self.laser_end = self.shootLaser(board, players)
elif action == 'wait':
self.eventAtLocation(board, flags)
self.next_action = ''
self.text = font.render('', True,yellow)
def isNorth(self, row):
return row<self.row
def isSouth(self, row):
return row>self.row
def isEast(self, col):
return col>self.col
def isWest(self, col):
return col<self.col
def chooseAction(self, board, players, flags):
#Choose an action randomly
r = random.randint(0,4)
if r == 0:
self.next_action = 'left'
elif r == 1:
self.next_action = 'right'
elif r == 2:
self.next_action = 'move'
elif r == 3:
self.next_action = 'wait'
else:
self.next_action = 'zap'
return self.next_action
def rotateLeft(self):
index = direction_list.index(self.direction)
index -= 1
if index < 0:
index = len(direction_list)-1
self.direction = direction_list[index]
def rotateRight(self):
index = direction_list.index(self.direction)
index = (index+1)%len(direction_list)
self.direction = direction_list[index]
def forceMove(self, board, players, direction, flags):
row_change,col_change = getRowColChange(direction)
temp_row = self.row + row_change
temp_col = self.col + col_change
#Check for another player that got shoved and
#pass the shove down the line.
p = getPlayerAt(players, temp_row, temp_col)
if p != None:
p.forceMove(board, players, direction, flags)
#Complete the move
self.row = temp_row
self.col = temp_col
self.eventAtLocation(board, flags)
def move(self, board, players, flags):
if canMove(board, players, self.row, self.col, self.direction):
row_change,col_change = getRowColChange(self.direction)
temp_row = self.row + row_change
temp_col = self.col + col_change
#If someone else is in this space, shove them.
p = getPlayerAt(players, temp_row, temp_col)
if p != None:
p.forceMove(board, players, self.direction, flags)
#Complete the move
self.row = temp_row
self.col = temp_col
self.eventAtLocation(board, flags)
def eventAtLocation(self, board, flags):
'''Check for and activate any event at current location'''
#Check for off the board and reset to a checkpoint
if outOfBounds(board,self.row,self.col):
self.row, self.col = self.checkpoint
#Check for falling in a hole
elif board[self.row][self.col] == 'hole':
#print('Fell in a hole. Reset to last checkpoint')
self.row, self.col = self.checkpoint
#Lose half health rounded up
self.hp = int(self.hp/2)
if self.hp == 0:
self.hp = 1
#Update our checkpoint if we landed on a checkpoint
elif board[self.row][self.col] == 'chek':
self.checkpoint = (self.row, self.col)
#Also gain 4 health
self.hp = min(self.max_hp, self.hp+4)
#Check for getting a flag
for f in flags:
if f.row==self.row and f.col==self.col and not(f.color in self.flags):
self.flags.append(f.color)
def getCenter(self):
offset_x = self.col*tile_width + tile_width/2
offset_y = self.row*tile_height + tile_height/2
return offset_x, offset_y
def getCorners(self):
#Returns list of points to draw the robot
points = []
offset_x, offset_y = self.getCenter()
heading = 0
if self.direction == 'north':
heading = -math.pi/2
elif self.direction == 'south':
heading = math.pi/2
elif self.direction == 'west':
heading = math.pi
#Nose
angle = heading+math.pi*2
x = offset_x + math.cos(angle)*self.radius*1.5
y = offset_y + math.sin(angle)*self.radius*1.5
points.append([x, y])
#wing 1
angle = heading+math.pi*2*(1.2/self.sides)
x = offset_x + math.cos(angle)*self.radius
y = offset_y + math.sin(angle)*self.radius
points.append([x, y])
#rear
angle = heading+math.pi
x = offset_x + math.cos(angle)*self.radius*0.5
y = offset_y + math.sin(angle)*self.radius*0.5
points.append([x, y])
#wing 2
angle = heading+math.pi*2*(1.8/self.sides)
x = offset_x + math.cos(angle)*self.radius
y = offset_y + math.sin(angle)*self.radius
points.append([x, y])
return points
def draw(self):
#Draw all flags you are carrying
i = -1.5
for f in self.flags:
temp = flag.Flag(self.screen, self.row, self.col, f)
temp.drawSmall(int(0.25*i*tile_width), 0.5)
i = i+1
#Blink
c = self.color
if self.blink:
c = black
#Draw outline of ship.
points = self.getCorners()
pygame.draw.polygon(self.screen, c,
points,int(scaling*8))
#Draw current action
offset_x, offset_y = self.getCenter()
self.screen.blit(self.text, (offset_x, offset_y))
#Draw health bar
r = pygame.Rect(offset_x-tile_width/2.4,
offset_y+tile_height*0.3,
tile_width*0.8,
tile_height*0.1)
pygame.draw.rect(self.screen, red, r)
r = pygame.Rect(offset_x-tile_width/2.4,
offset_y+tile_height*0.3,
tile_width*0.8*(self.hp/self.max_hp),
tile_height*0.1)
pygame.draw.rect(self.screen, green, r)
#Draw laser beam
if self.laser_end!=None:
x = self.laser_end[1]*tile_width + tile_width/2
y = self.laser_end[0]*tile_height + tile_height/2
start = (offset_x, offset_y)
end = (x,y)
pygame.draw.line(self.screen, red, start, end, 3)
pygame.draw.line(self.screen, white, start, end, 1)
|
[
"flag.Flag",
"random.randint",
"math.sin",
"math.cos"
] |
[((3963, 3983), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (3977, 3983), False, 'import random, math, flag\n'), ((8352, 8397), 'flag.Flag', 'flag.Flag', (['self.screen', 'self.row', 'self.col', 'f'], {}), '(self.screen, self.row, self.col, f)\n', (8361, 8397), False, 'import random, math, flag\n'), ((7710, 7725), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (7718, 7725), False, 'import random, math, flag\n'), ((7761, 7776), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (7769, 7776), False, 'import random, math, flag\n'), ((8095, 8110), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (8103, 8110), False, 'import random, math, flag\n'), ((8146, 8161), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (8154, 8161), False, 'import random, math, flag\n'), ((7503, 7518), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (7511, 7518), False, 'import random, math, flag\n'), ((7558, 7573), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (7566, 7573), False, 'import random, math, flag\n'), ((7888, 7903), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (7896, 7903), False, 'import random, math, flag\n'), ((7943, 7958), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (7951, 7958), False, 'import random, math, flag\n')]
|
# https://leetcode.com/problems/pseudo-palindromic-paths-in-a-binary-tree/
# idea: DFS
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
from collections import Counter
def dfs(self, r, path):
#print(r.val)
a1 = 0
a2 = 0
if r.left == None and r.right == None:
path = path + str(r.val)
#print(r)
# check
#print(path)
#print('----')
cnt = Counter(path)
n_odd = 0
for k in cnt:
if cnt[k] % 2 == 1:
n_odd += 1
if n_odd > 1:
return 0
return 1
else:
if r.left:
a1 = self.dfs(r.left, path + str(r.val))
if r.right:
a2 = self.dfs(r.right, path + str(r.val))
return a1 + a2
def pseudoPalindromicPaths (self, root: TreeNode) -> int:
ans = self.dfs(root, '')
#print('#####')
return ans
|
[
"collections.Counter"
] |
[((608, 621), 'collections.Counter', 'Counter', (['path'], {}), '(path)\n', (615, 621), False, 'from collections import Counter\n')]
|
import logging
class Watcher:
"""
A basic Watcher
Users can register a Listener to this Watcher to get updates when the
object the Watcher watches is updated.
"""
def __init__(self):
"Initialize the watcher with an empty set of listeners"
# When Watcher is subclassed, this needs to be called explicitly
self.listeners = [] # set()
self.logger = logging.getLogger("bitey.watcher.Watcher")
def register(self, listener):
"""
Register to get messages when this component is updated.
listener is a Listener
"""
if ("listeners" in self.__dict__) and self.listeners:
self.listeners.append(listener)
else:
# TODO: Figure out why there's lookup issues with base class / subclass
self.listeners = [] # set()
self.listeners.append(listener)
def update(self):
"""
Update any registered listeners that the watched object was changed.
obj is the object
"""
# When called by the subclass, the subclass attributes aren't properly searched
if ("listeners" in self.__dict__) and self.listeners:
for listener in self.listeners:
listener.update(self)
|
[
"logging.getLogger"
] |
[((406, 448), 'logging.getLogger', 'logging.getLogger', (['"""bitey.watcher.Watcher"""'], {}), "('bitey.watcher.Watcher')\n", (423, 448), False, 'import logging\n')]
|
import clr, os.path
from sys import path
path.append(os.path.abspath(__file__ + "\\..\\..\\..\\bin"))
clr.AddReference("NationalInstruments.RFToolkits.Interop.Fx40")
clr.AddReference("NationalInstruments.ModularInstruments.Interop.Fx40")
import NationalInstruments.RFToolkits.Interop as Toolkits
import NationalInstruments.ModularInstruments.Interop as ModInst
import System.Runtime.InteropServices as InteropServices
# Instrument Parameters
rfsaResourceNames = ["VST_01", "VST_02"]
masterReferenceClockSource = ModInst.niRFSAConstants.PxiClkStr
carrierFrequency = 5.18e+9
referenceLevels = [0.0, 0.0]
externalAttenuations = [0.0, 0.0]
triggerLines = [2, 3, 4] # PXI trigger lines for routing synchronization lines on VST1 models
# LO Distribution Parameters
LOSource = Toolkits.niWLANAConstants.LOSourceExternal # sets LO source to LO_in for each analyzer
externalLOResourceName = "LO_01" # implies WLANA will control the external LO
externalLOReferenceClockSource = ModInst.niRFSGConstants.OnBoardClockStr
LOChannelName = "lo2"
rfsaLODaisyEnabled = True
LOExportToExternalDeviceEnabled = False # specifies whether to export the LO on last analyzer in the daisy chain
# Measurement Parameters
standard = Toolkits.niWLANAConstants.Standard80211axMimoOfdm
numRx = len(rfsaResourceNames)
chBW = 80.00e+6
ofdmNoOfAverages = 10
ofdmMaxSymbolsUsed = 16
optimizeReferenceLevelForEVMEnabled = False
optimizeReferenceLevelForEVMMargin = 0
noiseCompensationEnabled = False
autoLevelEnabled = False
# Configure Toolkit Session
wlana = Toolkits.niWLANA(Toolkits.niWLANAConstants.CompatibilityVersion050000)
wlana.SetStandard(None, standard)
wlana.SetNumberOfReceiveChannels(None, numRx)
wlana.SetChannelBandwidth(None, chBW)
wlana.SetCarrierFrequency(None, carrierFrequency)
wlana.SetIQPowerEdgeReferenceTriggerEnabled(None, True)
wlana.SetTClkSynchronizationEnabled(None, True)
wlana.SetOptimizeReferenceLevelForEVMEnabled(None, optimizeReferenceLevelForEVMEnabled)
wlana.SetOptimizeReferenceLevelForEVMMargin(None, optimizeReferenceLevelForEVMMargin)
wlana.SetNoiseCompensationEnabled(None, noiseCompensationEnabled)
wlana.SetLOOffsetMode(None, Toolkits.niWLANAConstants.LOFrequencyOffsetModeAuto)
wlana.SetOfdmDemodEnabled(None, True)
wlana.SetOfdmDemodNumberOfAverages(None, ofdmNoOfAverages)
wlana.SetOfdmDemodMaximumSymbolsUsed(None, ofdmMaxSymbolsUsed)
wlana.SetOFDMDemodAutoComputeMeasurementLengthEnabled(None, True)
# Configure External LO
externalLOHandle = InteropServices.HandleRef()
if externalLOResourceName is not None and LOSource == Toolkits.niWLANAConstants.LOSourceExternal:
externalLOSession = ModInst.niRFSG(externalLOResourceName, True, False)
externalLOSession.ConfigureRefClock(externalLOReferenceClockSource, 10e6)
externalLOSession.SetGenerationMode(None, ModInst.niRFSGConstants.Cw)
externalLOHandle = externalLOSession.Handle
# Configure Analyzer Sessions
rfsaSessions = [ModInst.niRFSA(resourceName, True, False) for resourceName in rfsaResourceNames]
rfsaHandles = [rfsaSession.Handle for rfsaSession in rfsaSessions]
wlana.RFSAConfigureMultipleDeviceSynchronization(rfsaHandles, numRx, masterReferenceClockSource, triggerLines, len(triggerLines))
wlana.RFSAConfigureFrequencySingleLO(rfsaHandles, LOSource, externalLOHandle, carrierFrequency, rfsaLODaisyEnabled, LOExportToExternalDeviceEnabled)
for i in range(numRx):
rfsaSessions[i].SetExternalGain(None, -externalAttenuations[i])
if autoLevelEnabled:
_, actualReferencePowerLevel = wlana.RFSAAutoLevelv2(rfsaHandles[i], None, 10e-3, 5)
else:
_, actualReferencePowerLevel = wlana.RFSAConfigureOptimalEVMReferenceLevel(rfsaHandles[i], None, referenceLevels[i], 0.0)
if i == 0:
rfsaSessions[i].ConfigureIQPowerEdgeRefTrigger('0', actualReferencePowerLevel - 20.00, ModInst.niRFSAConstants.RisingSlope, 0)
print(rfsaResourceNames[i] + " actual reference level (dBm): " + str(actualReferencePowerLevel))
# # Cascade LO Power Levels for Optimal Performance
# if rfsaLODaisyEnabled:
# if externalLOResourceName is not None and LOSource == Toolkits.niWLANAConstants.LOSourceExternal:
# _, loOutPower = externalLOSession.GetPowerLevel(None, 0.0)
# rfsaSessions[0].SetLoInPower(LOChannelName, loOutPower)
# rfsaSessions[0].SetLoOutPower(LOChannelName, loOutPower)
# for i in range(numRx - 1):
# _, loOutPower = rfsaSessions[i].GetLoOutPower(LOChannelName, 0.0)
# rfsaSessions[i + 1].SetLoInPower(LOChannelName, loOutPower)
# _, loInPower = rfsaSessions[i + 1].GetLoInPower(LOChannelName, 0.0)
# else:
# pass # loss through splitter will need to be considered
# Measure and Print Results
if externalLOResourceName is not None and LOSource == Toolkits.niWLANAConstants.LOSourceExternal:
externalLOSession.Initiate()
wlana.RFSAMIMOMeasure(rfsaHandles, None, numRx, 10)
_, detectedNumberOfSymbolsUsed = wlana.GetOFDMDemodNumberOfSymbolsUsed(None, 0)
print("Number of Symbols Used: " + str(detectedNumberOfSymbolsUsed))
_, numberOfSpaceTimeStreams = wlana.GetCurrentIterationOFDMDemodNumberOfSpaceTimeStreams(None, 0)
for i in range(numberOfSpaceTimeStreams):
channelString = "Stream" + str(i)
_, rmsEvm = wlana.GetResultOfdmDemodRmsEvmAverage(channelString, 0.0)
print(channelString + " RMS EVM: " + str(rmsEvm))
# Close Sessions
wlana.Close()
if externalLOResourceName is not None and LOSource == Toolkits.niWLANAConstants.LOSourceExternal:
externalLOSession.close()
for rfsaSession in rfsaSessions:
rfsaSession.Close()
|
[
"NationalInstruments.ModularInstruments.Interop.niRFSG",
"System.Runtime.InteropServices.HandleRef",
"clr.AddReference",
"NationalInstruments.RFToolkits.Interop.niWLANA",
"NationalInstruments.ModularInstruments.Interop.niRFSA"
] |
[((107, 170), 'clr.AddReference', 'clr.AddReference', (['"""NationalInstruments.RFToolkits.Interop.Fx40"""'], {}), "('NationalInstruments.RFToolkits.Interop.Fx40')\n", (123, 170), False, 'import clr, os.path\n'), ((172, 243), 'clr.AddReference', 'clr.AddReference', (['"""NationalInstruments.ModularInstruments.Interop.Fx40"""'], {}), "('NationalInstruments.ModularInstruments.Interop.Fx40')\n", (188, 243), False, 'import clr, os.path\n'), ((1570, 1640), 'NationalInstruments.RFToolkits.Interop.niWLANA', 'Toolkits.niWLANA', (['Toolkits.niWLANAConstants.CompatibilityVersion050000'], {}), '(Toolkits.niWLANAConstants.CompatibilityVersion050000)\n', (1586, 1640), True, 'import NationalInstruments.RFToolkits.Interop as Toolkits\n'), ((2521, 2548), 'System.Runtime.InteropServices.HandleRef', 'InteropServices.HandleRef', ([], {}), '()\n', (2546, 2548), True, 'import System.Runtime.InteropServices as InteropServices\n'), ((2673, 2724), 'NationalInstruments.ModularInstruments.Interop.niRFSG', 'ModInst.niRFSG', (['externalLOResourceName', '(True)', '(False)'], {}), '(externalLOResourceName, True, False)\n', (2687, 2724), True, 'import NationalInstruments.ModularInstruments.Interop as ModInst\n'), ((2978, 3019), 'NationalInstruments.ModularInstruments.Interop.niRFSA', 'ModInst.niRFSA', (['resourceName', '(True)', '(False)'], {}), '(resourceName, True, False)\n', (2992, 3019), True, 'import NationalInstruments.ModularInstruments.Interop as ModInst\n')]
|
"""
Math Utilities
"""
import collections
import numpy as np
from nnlib.utils.functional import not_none
__all__ = ['FNVHash', 'ceil_div', 'normalize', 'pow', 'prod', 'sum', 'random_subset', 'softmax']
class FNVHash:
hval = 0x811c9dc5
fnv_32_prime = 0x01000193
uint32_max = 2 ** 32
@staticmethod
def hash(s):
h = FNVHash.hval
for ch in s:
h = ((h ^ ord(ch)) * FNVHash.fnv_32_prime) % FNVHash.uint32_max
return h
def ceil_div(a, b):
r"""
Integer division that rounds up.
"""
return (a + b - 1) // b
def normalize(xs):
r"""
NumPy-based normalization.
"""
arr = np.asarray(xs, dtype=np.float)
return arr / np.sum(arr)
# noinspection PyShadowingBuiltins
def pow(a, b, fast=True):
r"""
Compute ``a ** b`` (``a`` raised to ``b``-th power). ``b`` has to be a positive integer.
**Note:** It is not required for ``type(a)`` to have an identity element.
:param a: The base. Can be any variable that supports multiplication ``*``.
:type b: int
:param b: The exponent.
:param fast: Whether to use fast exponent algorithm (that runs in logarithmic time).
"""
if type(b) is not int:
raise TypeError("Exponent should be a positive integer.")
if b < 1:
raise ValueError("Exponent should be a positive integer.")
result = a
b -= 1
if fast: # O(log b)
while b > 0:
if b % 2 == 1:
result *= a
b //= 2
a *= a
else: # O(b)
while b > 0:
result *= a
b -= 1
return result
def _reduce(fn, *args):
r"""
Recursively reduce over sequence of values, where values can be sequences. None values are ignored.
:type fn: (Any, Any) -> Any
:param fn: Function taking (accumulator, element) and returning new accumulator.
"""
result = None
for x in filter(not_none, args):
val = _reduce(fn, *x) if isinstance(x, collections.Iterable) else x
if result is None:
result = val
else:
result = fn(result, val)
return result
def _ireduce(fn, *args):
r"""
In-place version of ``_reduce``.
:type fn: (Any, Any) -> None
:param fn: Function taking (accumulator, element) and performing in-place operation on accumulator.
"""
return _reduce(lambda x, y: [fn(x, y), x][-1], *args)
def prod(*args):
r"""
Compute product of arguments, ignoring ``None`` values. Arguments could contain lists, or list of lists, etc.
**Note:** It is not required for list elements to have an identity element.
"""
return _reduce(lambda x, y: x.__rmul__(y), *args)
# noinspection PyShadowingBuiltins
def sum(*args):
r"""
Compute sum of arguments, ignoring ``None`` values. Arguments could contain lists, or list of lists, etc.
**Note:** It is not required for list elements to have an identity element.
"""
return _reduce(lambda x, y: x.__add__(y), *args)
def random_subset(total, size):
r"""
Select a random subset of size ``size`` from the larger set of size ``total``.
This method is implemented to replace :func:`numpy.random.choice` with :attr:`replacement=False`.
:param total: Size of the original set.
:param size: Size of the randomly selected subset.
:return: The 0-based indices of the subset elements.
"""
if isinstance(size, float):
size = int(total * size)
# Don't trust `np.random.choice` without replacement! It's using brute force!
if size * np.log(size) > total:
return np.random.permutation(np.arange(total))[:size]
else:
choices = set()
while len(choices) < size:
choices.add(np.random.choice(total, size - len(choices)).tolist())
return choices
def softmax(xs, t=1):
r"""
NumPy-based softmax with temperature. Returns a sequence with each element calculated as:
.. math::
s_i = \frac{ \exp(x_i / t) }{ \sum_x \exp(x / t) }
:param xs: The sequence of weights.
:param t: Temperature. Higher temperatures give a more uniform distribution, while lower temperatures give a more
peaked distribution.
"""
arr = np.exp(np.asarray(xs) / t)
return arr / np.sum(arr)
|
[
"numpy.arange",
"numpy.asarray",
"numpy.sum",
"numpy.log"
] |
[((657, 687), 'numpy.asarray', 'np.asarray', (['xs'], {'dtype': 'np.float'}), '(xs, dtype=np.float)\n', (667, 687), True, 'import numpy as np\n'), ((705, 716), 'numpy.sum', 'np.sum', (['arr'], {}), '(arr)\n', (711, 716), True, 'import numpy as np\n'), ((4282, 4293), 'numpy.sum', 'np.sum', (['arr'], {}), '(arr)\n', (4288, 4293), True, 'import numpy as np\n'), ((3576, 3588), 'numpy.log', 'np.log', (['size'], {}), '(size)\n', (3582, 3588), True, 'import numpy as np\n'), ((4245, 4259), 'numpy.asarray', 'np.asarray', (['xs'], {}), '(xs)\n', (4255, 4259), True, 'import numpy as np\n'), ((3635, 3651), 'numpy.arange', 'np.arange', (['total'], {}), '(total)\n', (3644, 3651), True, 'import numpy as np\n')]
|
"""Beat-synchronous chroma feature calculation with LabROSA.
<NAME> <EMAIL> 2016-04-08
"""
from __future__ import print_function
import cPickle as pickle
import getopt
import os
import sys
import time
import numpy as np
import scipy
import sklearn.mixture
import librosa
def read_iso_label_file(filename):
"""Read in an isophonics-format chord label file."""
times = []
labels = []
with open(filename, 'r') as f:
for line in f:
fields = line.strip().split(' ')
start_secs = float(fields[0])
end_secs = float(fields[1])
times.append((start_secs, end_secs))
labels.append(fields[2])
return np.array(times), labels
def calculate_overlap_durations(ranges_a, ranges_b):
"""Calculate duration of overlaps between all (start, end) intervals."""
max_starts_matrix = np.maximum.outer(ranges_a[:, 0], ranges_b[:, 0])
min_ends_matrix = np.minimum.outer(ranges_a[:, 1], ranges_b[:, 1])
overlap_durations = np.maximum(0, min_ends_matrix - max_starts_matrix)
return overlap_durations
def sample_label_sequence(sample_ranges, label_ranges, labels):
"""Find the most-overlapping label for a list of (start, end) intervals."""
overlaps = calculate_overlap_durations(sample_ranges, label_ranges)
best_label = np.argmax(overlaps, axis=1)
return [labels[i] for i in best_label]
def chord_name_to_index(labels):
"""Convert chord name strings into model indices (0..25)."""
indices = np.zeros(len(labels), dtype=int)
root_degrees = {'C': 0, 'D': 2, 'E': 4, 'F':5, 'G': 7, 'A':9, 'B': 11}
for label_index, label in enumerate(labels):
if label == 'N' or label == 'X':
# Leave at zero.
continue
root_degree = root_degrees[label[0].upper()]
minor = False
if len(label) > 1:
if label[1] == '#':
root_degree = (root_degree + 1) % 12
if label[1] == 'b':
root_degree = (root_degree - 1) % 12
if ':' in label:
modifier = label[label.index(':') + 1:]
if modifier[:3] == 'min':
minor = True
indices[label_index] = 1 + root_degree + 12 * minor
return indices
def calculate_beat_sync_chroma_of_file(wavfilename):
"""Read the audio, calculate beat-sync chroma."""
y, sr = librosa.load(wavfilename, sr=None)
hop_length = 128 # 8 ms at 16 kHz
tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr,
hop_length=hop_length,
start_bpm=240)
# Append a final beat time one beat beyond the end.
extended_beat_frames = np.hstack([beat_frames,
2*beat_frames[-1] - beat_frames[-2]])
frame_chroma = librosa.feature.chroma_cqt(y=y, sr=sr, hop_length=hop_length)
# Drop the first beat_chroma which is stuff before the first beat,
# and the final beat_chroma which is everything after the last beat time.
beat_chroma = librosa.feature.sync(frame_chroma,
extended_beat_frames).transpose()
# Drop first row if the beat_frames start after the beginning.
if beat_frames[0] > 0:
beat_chroma = beat_chroma[1:]
# Keep only as many frames as beat times.
beat_chroma = beat_chroma[:len(beat_frames)]
assert beat_chroma.shape[0] == beat_frames.shape[0]
frame_rate = sr / float(hop_length)
beat_times = beat_frames / frame_rate
return beat_times, beat_chroma
def calculate_label_indices(labfilename, beat_times):
"""Read a label file, sample at beat times, return 0..25 indices."""
# MP3s encoded with lame have a 68 ms delay
LAME_DELAY_SECONDS = 0.068
extended_beat_times = (np.hstack([beat_times,
2*beat_times[-1] - beat_times[-2]]) -
LAME_DELAY_SECONDS)
beat_ranges = np.hstack([extended_beat_times[:-1, np.newaxis],
extended_beat_times[1:, np.newaxis]])
label_time_ranges, labels = read_iso_label_file(labfilename)
beat_labels = sample_label_sequence(beat_ranges, label_time_ranges, labels)
label_indices = chord_name_to_index(beat_labels)
return label_indices
def write_beat_chroma_labels(filename, beat_times, chroma_features,
label_indices):
"""Write out the computed beat-synchronous chroma data."""
# Create the enclosing directory if needed.
directory = os.path.dirname(filename)
if directory and not os.path.exists(directory):
os.makedirs(directory)
with open(filename, "wb") as f:
pickle.dump((beat_times, chroma_features, label_indices),
f, pickle.HIGHEST_PROTOCOL)
def read_beat_chroma_labels(filename):
"""Read back a precomputed beat-synchronous chroma record."""
with open(filename, "rb") as f:
beat_times, chroma_features, label_indices = pickle.load(f)
return beat_times, chroma_features, label_indices
def read_file_list(filename):
"""Read a text file with one item per line."""
items = []
with open(filename, 'r') as f:
for line in f:
items.append(line.strip())
return items
def process_items(input_list_file, wav_base_dir, lab_base_dir, output_base_dir,
start_index, num_to_process):
"""Process files from a list."""
all_ids = read_file_list(input_list_file)
print("total ids in list:", len(all_ids))
if num_to_process > 0:
ids_to_process = all_ids[start_index : start_index + num_to_process]
else:
ids_to_process = all_ids[start_index:]
for number, file_id in enumerate(ids_to_process):
print(time.ctime(), "File {:d} of {:d}: {:s}".format(
number, len(ids_to_process), file_id))
wavfilename = os.path.join(wav_base_dir, file_id + '.mp3')
beat_times, beat_chroma = calculate_beat_sync_chroma_of_file(
wavfilename)
if lab_base_dir:
labfilename = os.path.join(lab_base_dir, file_id + '.txt')
label_indices = calculate_label_indices(labfilename, beat_times)
else:
label_indices = None
beatchromlab_filename = os.path.join(output_base_dir, file_id + '.pkl')
write_beat_chroma_labels(beatchromlab_filename, beat_times,
beat_chroma, label_indices)
#DATA_DIR = '/q/porkpie/porkpie-p9/hog-restored/hog-p9/drspeech/data/music/'
HELP_STRING = '-i <inputlistfile> -o <outputbasedir> -w <wavbasedir> -l <labbasedir> -s <startindex> -n <numtoprocess>'
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv[1:], "hi:o:s:n:w:l:",
["inputlistfile=", "outputbasedir=",
"startindex=", "numtoprocess=",
"wavbasedir=", "labbasedir="])
except getopt.GetoptError:
print(argv[0], HELP_STRING)
sys.exit(2)
input_list_file = 'mp3s-mp3s.txt'
output_base_dir = 'beatchromftrs'
wav_base_dir = 'mp3s-32k'
lab_base_dir = None
start_index = 0
num_to_process = -1
for opt, arg in opts:
if opt == '-h':
print(argv[0], HELP_STRING)
sys.exit()
elif opt in ("-i", "--inputlistfile"):
input_list_file = arg
elif opt in ("-o", "--outputbasedir"):
output_base_dir = arg
elif opt in ("-s", "--startindex"):
start_index = int(arg)
elif opt in ("-n", "--numtoprocess"):
num_to_process = int(arg)
elif opt in ("-w", "--wavbasedir"):
wav_base_dir = arg
elif opt in ("-l", "--labbasedir"):
lab_base_dir = arg
process_items(input_list_file, wav_base_dir, lab_base_dir,
output_base_dir, start_index, num_to_process)
if __name__ == "__main__":
main(sys.argv)
|
[
"numpy.maximum",
"getopt.getopt",
"numpy.argmax",
"time.ctime",
"cPickle.load",
"librosa.feature.sync",
"os.path.join",
"os.path.dirname",
"os.path.exists",
"numpy.maximum.outer",
"librosa.feature.chroma_cqt",
"numpy.hstack",
"librosa.load",
"sys.exit",
"os.makedirs",
"cPickle.dump",
"numpy.array",
"librosa.beat.beat_track",
"numpy.minimum.outer"
] |
[((864, 912), 'numpy.maximum.outer', 'np.maximum.outer', (['ranges_a[:, 0]', 'ranges_b[:, 0]'], {}), '(ranges_a[:, 0], ranges_b[:, 0])\n', (880, 912), True, 'import numpy as np\n'), ((935, 983), 'numpy.minimum.outer', 'np.minimum.outer', (['ranges_a[:, 1]', 'ranges_b[:, 1]'], {}), '(ranges_a[:, 1], ranges_b[:, 1])\n', (951, 983), True, 'import numpy as np\n'), ((1008, 1058), 'numpy.maximum', 'np.maximum', (['(0)', '(min_ends_matrix - max_starts_matrix)'], {}), '(0, min_ends_matrix - max_starts_matrix)\n', (1018, 1058), True, 'import numpy as np\n'), ((1323, 1350), 'numpy.argmax', 'np.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (1332, 1350), True, 'import numpy as np\n'), ((2388, 2422), 'librosa.load', 'librosa.load', (['wavfilename'], {'sr': 'None'}), '(wavfilename, sr=None)\n', (2400, 2422), False, 'import librosa\n'), ((2487, 2560), 'librosa.beat.beat_track', 'librosa.beat.beat_track', ([], {'y': 'y', 'sr': 'sr', 'hop_length': 'hop_length', 'start_bpm': '(240)'}), '(y=y, sr=sr, hop_length=hop_length, start_bpm=240)\n', (2510, 2560), False, 'import librosa\n'), ((2744, 2807), 'numpy.hstack', 'np.hstack', (['[beat_frames, 2 * beat_frames[-1] - beat_frames[-2]]'], {}), '([beat_frames, 2 * beat_frames[-1] - beat_frames[-2]])\n', (2753, 2807), True, 'import numpy as np\n'), ((2864, 2925), 'librosa.feature.chroma_cqt', 'librosa.feature.chroma_cqt', ([], {'y': 'y', 'sr': 'sr', 'hop_length': 'hop_length'}), '(y=y, sr=sr, hop_length=hop_length)\n', (2890, 2925), False, 'import librosa\n'), ((4003, 4094), 'numpy.hstack', 'np.hstack', (['[extended_beat_times[:-1, np.newaxis], extended_beat_times[1:, np.newaxis]]'], {}), '([extended_beat_times[:-1, np.newaxis], extended_beat_times[1:, np\n .newaxis]])\n', (4012, 4094), True, 'import numpy as np\n'), ((4586, 4611), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (4601, 4611), False, 'import os\n'), ((684, 699), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (692, 699), True, 'import numpy as np\n'), ((3838, 3898), 'numpy.hstack', 'np.hstack', (['[beat_times, 2 * beat_times[-1] - beat_times[-2]]'], {}), '([beat_times, 2 * beat_times[-1] - beat_times[-2]])\n', (3847, 3898), True, 'import numpy as np\n'), ((4672, 4694), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (4683, 4694), False, 'import os\n'), ((4739, 4829), 'cPickle.dump', 'pickle.dump', (['(beat_times, chroma_features, label_indices)', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '((beat_times, chroma_features, label_indices), f, pickle.\n HIGHEST_PROTOCOL)\n', (4750, 4829), True, 'import cPickle as pickle\n'), ((5042, 5056), 'cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5053, 5056), True, 'import cPickle as pickle\n'), ((5935, 5979), 'os.path.join', 'os.path.join', (['wav_base_dir', "(file_id + '.mp3')"], {}), "(wav_base_dir, file_id + '.mp3')\n", (5947, 5979), False, 'import os\n'), ((6327, 6374), 'os.path.join', 'os.path.join', (['output_base_dir', "(file_id + '.pkl')"], {}), "(output_base_dir, file_id + '.pkl')\n", (6339, 6374), False, 'import os\n'), ((6792, 6940), 'getopt.getopt', 'getopt.getopt', (['argv[1:]', '"""hi:o:s:n:w:l:"""', "['inputlistfile=', 'outputbasedir=', 'startindex=', 'numtoprocess=',\n 'wavbasedir=', 'labbasedir=']"], {}), "(argv[1:], 'hi:o:s:n:w:l:', ['inputlistfile=',\n 'outputbasedir=', 'startindex=', 'numtoprocess=', 'wavbasedir=',\n 'labbasedir='])\n", (6805, 6940), False, 'import getopt\n'), ((3094, 3150), 'librosa.feature.sync', 'librosa.feature.sync', (['frame_chroma', 'extended_beat_frames'], {}), '(frame_chroma, extended_beat_frames)\n', (3114, 3150), False, 'import librosa\n'), ((4637, 4662), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (4651, 4662), False, 'import os\n'), ((5814, 5826), 'time.ctime', 'time.ctime', ([], {}), '()\n', (5824, 5826), False, 'import time\n'), ((6126, 6170), 'os.path.join', 'os.path.join', (['lab_base_dir', "(file_id + '.txt')"], {}), "(lab_base_dir, file_id + '.txt')\n", (6138, 6170), False, 'import os\n'), ((7115, 7126), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (7123, 7126), False, 'import sys\n'), ((7403, 7413), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7411, 7413), False, 'import sys\n')]
|
# Software Name: AIVC
# SPDX-FileCopyrightText: Copyright (c) 2021 Orange
# SPDX-License-Identifier: BSD 3-Clause "New"
#
# This software is distributed under the BSD-3-Clause license.
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
"""
This module contains all the methods needed to train a model:
- Training
- Test
- Load and save a model
- Setting a model to training of evaluation mode
"""
import os
import torch
import math
from func_util.console_display import print_log_msg
from func_util.img_processing import load_frames, cast_before_png_saving
from func_util.nn_util import crop_dic, get_value
from func_util.result_logging import generate_header_file, generate_log_metric_one_frame
from model_mngt.loss_function import compute_metrics_one_GOP, average_N_frame
from real_life.cat_binary_files import cat_one_video
from real_life.bitstream import ArithmeticCoder
def infer_one_sequence(param):
"""
The purpose of this function is to infer N successive GOPs from one
sequences. It relies on the function infer_one_GOP to generate the output
data and results.
"""
DEFAULT_PARAM = {
# The model to be trained. Must be a nn.Module
'model': None,
# The GOP structure defined as in func_util/GOP_structure.py
'GOP_struct': None,
# The name of the GOP structure, mainly for logging purpose
'GOP_struct_name': None,
# Absolute path of the folder containing the 3N PNG of the sequences
# (if YUV) of the N PNG if RGB.
'sequence_path': '',
# What is the first frame we encode. The previous one are skipped. 0 means
# we start at the first frame, 1 at the second etc.
'idx_starting_frame': 0,
# We want to compress frame from <idx_starting_frame> to <idx_end_frame> **included**
'idx_end_frame': 8,
# If True, there is only one PNG per frame, with the 3 color channels in it.
# If False, each frame needs 3 PNGs to be described: Y, U, and V.
'rgb': False,
# How the (raw) video frames are organized. Available:
# <old> : used for the ICLR paper i.e. <sequence_path>/idx_<y,u,v>.png
# <clic>: <sequence_path>/<sequence_name>_<padded_idx>_<y,u,v>.png
'loading_mode': 'old',
# If true, we generate a bitstream at the end
'generate_bitstream': False,
# Path of the directory in which we output the bitstream
'bitstream_dir': '',
# For multi-rate
'idx_rate': 0.,
# Set to true to generate more stuff, useful for debug
'flag_bitstream_debug': False,
# All internal log files for the NN will be written in this directory
'working_dir': '../logs/',
# Path of the final bitstream file
'final_bitstream_path': '',
}
# ========== RETRIEVE INPUTS ========== #
model = get_value('model', param, DEFAULT_PARAM)
GOP_struct = get_value('GOP_struct', param, DEFAULT_PARAM)
GOP_struct_name = get_value('GOP_struct_name', param, DEFAULT_PARAM)
sequence_path = get_value('sequence_path', param, DEFAULT_PARAM)
# nb_GOP = get_value('nb_GOP', param, DEFAULT_PARAM)
idx_starting_frame = get_value('idx_starting_frame', param, DEFAULT_PARAM)
idx_end_frame = get_value('idx_end_frame', param, DEFAULT_PARAM)
rgb = get_value('rgb', param, DEFAULT_PARAM)
loading_mode = get_value('loading_mode', param, DEFAULT_PARAM)
generate_bitstream = get_value('generate_bitstream', param, DEFAULT_PARAM)
bitstream_dir = get_value('bitstream_dir', param, DEFAULT_PARAM)
idx_rate = get_value('idx_rate', param, DEFAULT_PARAM)
flag_bitstream_debug = get_value('flag_bitstream_debug', param, DEFAULT_PARAM)
working_dir = get_value('working_dir', param, DEFAULT_PARAM)
final_bitstream_path = get_value('final_bitstream_path', param, DEFAULT_PARAM)
# ========== RETRIEVE INPUTS ========== #
# ========== IT WAS IN THE TEST FUNCTION ============= #
if not(working_dir.endswith('/')):
working_dir += '/'
# Retrieve lambda, useful to compute some losses
lambda_tradeoff_list = model.model_param.get('lambda_tradeoff')
# Construct working dir
os.system('mkdir -p ' + working_dir)
# # Clean pre-existing result files
# result_file_name = working_dir + 'summary.txt'
# # result_file_name = working_dir + 'results_' + str(GOP_struct_name) + '_rate_' + str(idx_rate).replace('.', '-') + '.txt'
# os.system('rm ' + result_file_name)
# # Create log file, this log file is here for all sequences
# file_res = open(result_file_name, 'w')
# file_res.write(generate_header_file())
# Lambda trade-off is only useful to compute some losses in the metric
# ! For now, round idx_rate to select the lambda trade_off
rounded_idx_rate = int(round(idx_rate))
lambda_tradeoff = lambda_tradeoff_list[rounded_idx_rate]
l_codec = lambda_tradeoff
l_mof = lambda_tradeoff
sequence_name = sequence_path.split('/')[-2]
print_log_msg('INFO', 'infer_one_sequence', 'sequence name', sequence_name)
print_log_msg('INFO', 'infer_one_sequence', 'GOP_struct_name', GOP_struct_name)
print_log_msg('INFO', 'infer_one_sequence', 'idx_rate', idx_rate)
# ========== IT WAS IN THE TEST FUNCTION ============= #
# ========== COMPUTE NUMBER OF FRAMES & GOP ========== #
if generate_bitstream:
if not(bitstream_dir.endswith('/')):
bitstream_dir += '/'
os.system('rm -r ' + bitstream_dir)
os.system('mkdir -p ' + bitstream_dir)
if flag_bitstream_debug:
debug_dir = '.' + '/'.join(bitstream_dir.split('/')[:-3]) + '/debug/' + bitstream_dir.split('/')[-2] + '/'
os.system('rm ' + debug_dir + '*.md5')
os.system('mkdir -p ' + debug_dir)
# Number of frames in the video sequence
if not(sequence_path.endswith('/')):
sequence_path += '/'
# How many frames we have to compress
nb_frames = idx_end_frame - idx_starting_frame + 1
# How many frames in a GOP
GOP_size = len(GOP_struct)
# How many GOPs we have to process. If there is a uncomplete GOP
# e.g. 5 frames in the sequences and a GOP size of 3, do 2 GOPs
# and pad the last one.
nb_GOP = math.ceil(nb_frames / GOP_size)
# How many frames are required for our number of GOPs
expected_nb_frames = nb_GOP * GOP_size
# When loading the last GOP, we'll need to pad the last GOP by <nb_frame_to_pad>
nb_frame_to_pad = expected_nb_frames - nb_frames
# ========== COMPUTE NUMBER OF FRAMES & GOP ========== #
# These dictionaries are here to log the result on the entire sequences.
sequence_result = {}
print_log_msg('DEBUG', 'infer_one_sequence', 'nb_GOP', nb_GOP)
for i in range(nb_GOP):
# Except for the last GOP, we don't need any padding
if i == nb_GOP - 1:
cur_nb_frame_to_pad = nb_frame_to_pad
else:
cur_nb_frame_to_pad = 0
# Load the frames
raw_frames = load_frames({
'sequence_path': sequence_path,
'idx_starting_frame': i * GOP_size + idx_starting_frame,
'nb_frame_to_load': GOP_size,
'nb_pad_frame': cur_nb_frame_to_pad,
'rgb': rgb,
'loading_mode': loading_mode,
})
# Perform forward for this GOP
_, GOP_result = infer_one_GOP({
'model': model,
'GOP_struct': GOP_struct,
'raw_frames': raw_frames,
'l_codec': l_codec,
'l_mof': l_mof,
'index_GOP_in_video': i,
'generate_bitstream': generate_bitstream,
'bitstream_dir': bitstream_dir,
# Same as idx_starting_frame in load_frames above
'real_idx_first_frame': i * GOP_size + idx_starting_frame,
'idx_rate': idx_rate,
'flag_bitstream_debug': flag_bitstream_debug,
})
# Retrieve some results for this GOP. To spare some memory, we don't
# keep trace of everything in net_out
# Beware, we do not retrieve GOP average result i.e. 'GOP' entry.
for f in range(GOP_size):
# Name of the frame inside the GOP
GOP_frame_name = 'frame_' + str(f)
# Name of the frame in the entire sequence
seq_frame_name = 'frame_' + str(i * GOP_size + f + idx_starting_frame)
# Retrieve all results from the current frame
sequence_result[seq_frame_name] = GOP_result.get(GOP_frame_name)
# We're almost done, we just have to average the results from the N frame
# to obtain the 'sequence' entry of the sequence_result dictionnary.
# This is pretty much the same thing as what is done for a GOP averaging in the
# loss function
sequence_result['sequence'] = average_N_frame({
'x': sequence_result,
'nb_pad_frame': cur_nb_frame_to_pad,
})
if generate_bitstream:
# input('Before cat one video')
cat_one_video({
'bitstream_dir': bitstream_dir,
'idx_starting_frame': idx_starting_frame,
'idx_end_frame': idx_end_frame,
'final_bitstream_path': final_bitstream_path,
})
# input('After cat one video')
# ========== IT WAS IN THE TEST FUNCTION ============= #
# Log all metrics for all frames in a special log file
# Special repositories for the sequences detailed log file
file_res_sequence = open(working_dir + 'detailed.txt', 'w')
file_res_sequence.write(generate_header_file())
for f in sequence_result:
sequence_result[f]['pic_name'] = sequence_name
sequence_result[f]['frame_idx'] = f
file_res_sequence.write(generate_log_metric_one_frame(sequence_result.get(f)))
# # Add a final line in the logging for the entire sequence in the general file
# file_res.write(generate_log_metric_one_frame(sequence_result.get('sequence')))
file_res_sequence.close()
# ========== IT WAS IN THE TEST FUNCTION ============= #
# We're done we can return the sequence result
return sequence_result
def infer_one_GOP(param):
"""
The purpose of this function is to infer one GOP and to
return a result dictionnary, gathering as much metrics as needed.
This is separate from the training function, and it is basically
a warper around the GOP_forward function of the model.
"""
DEFAULT_PARAM = {
# The model to be trained. Must be a nn.Module
'model': None,
# The GOP structure defined as in func_util/GOP_structure.py
'GOP_struct': None,
# The uncompressed frames (i.e. the frames to code), defined as:
# frame_0: {'y': tensor, 'u': tensor, 'v': tensor}
# frame_1: {'y': tensor, 'u': tensor, 'v': tensor}
'raw_frames': None,
# Lambda for CodecNet rate
'l_codec': 0.,
# Lambda for MOFNet rate
'l_mof': 0.,
# Index of the GOP in the video. Scalar in [0, N]
'index_GOP_in_video': 0,
# If true, we generate a bitstream at the end
'generate_bitstream': False,
# Path of the directory in which we output the bitstream
'bitstream_dir': '',
# Frame index in the video of the first frame (I) of the
# GOP.
'real_idx_first_frame': 0,
# For multi-rate
'idx_rate': 0.,
# Set to true to generate more stuff, useful for debug
'flag_bitstream_debug': False,
}
# ========== RETRIEVE INPUTS ========== #
model = get_value('model', param, DEFAULT_PARAM)
GOP_struct = get_value('GOP_struct', param, DEFAULT_PARAM)
raw_frames = get_value('raw_frames', param, DEFAULT_PARAM)
l_codec = get_value('l_codec', param, DEFAULT_PARAM)
l_mof = get_value('l_mof', param, DEFAULT_PARAM)
index_GOP_in_video = get_value('index_GOP_in_video', param, DEFAULT_PARAM)
generate_bitstream = get_value('generate_bitstream', param, DEFAULT_PARAM)
bitstream_dir = get_value('bitstream_dir', param, DEFAULT_PARAM)
real_idx_first_frame = get_value('real_idx_first_frame', param, DEFAULT_PARAM)
idx_rate = get_value('idx_rate', param, DEFAULT_PARAM)
flag_bitstream_debug = get_value('flag_bitstream_debug', param, DEFAULT_PARAM)
# ========== RETRIEVE INPUTS ========== #
# Set model to evaluation mode
model = model.eval()
# # Retrieve the device on which we're working
# my_device = COMPUTE_PARAM.get('device')
# # Data represent the raw frames (i.e. uncompressed)
# raw_frames = push_gop_to_device(raw_frames, my_device)
model_input = {
'GOP_struct': GOP_struct,
'raw_frames': raw_frames,
'idx_rate': idx_rate,
'index_GOP_in_video': index_GOP_in_video,
'generate_bitstream': generate_bitstream,
'real_idx_first_frame': real_idx_first_frame,
'bitstream_dir': bitstream_dir,
'flag_bitstream_debug': flag_bitstream_debug,
}
with torch.no_grad():
net_out = model.GOP_forward(model_input)
for f in net_out:
# Clamp and crop the output
net_out[f]['x_hat'] = crop_dic(net_out.get(f).get('x_hat'), raw_frames.get(f))
net_out[f]['x_hat'] = cast_before_png_saving({
'x': net_out.get(f).get('x_hat'), 'data_type': 'yuv_dic',
})
torch.cuda.empty_cache()
_, result = compute_metrics_one_GOP({
'net_out': net_out,
'target': raw_frames,
'l_mof': l_mof,
'l_codec': l_codec,
})
# net_out should not be that useful,;and results contains a bunch of metrics
# for the different frames.
return net_out, result
def load_model(prefix='', on_cpu=False):
"""
Load the model with a given prefix as parameter
"""
map_loc = torch.device('cpu') if on_cpu else None
model = torch.load('./' + prefix + 'model.pt', map_location=map_loc)
# We add it to the class atribute. This way it is save alongside
# the model and can be loaded in the decoder
model.codec_net.codec_net.ac = ArithmeticCoder({
'balle_pdf_estim_z': model.codec_net.codec_net.pdf_z,
'device': map_loc,
})
model.mode_net.mode_net.ac = ArithmeticCoder({
'balle_pdf_estim_z': model.mode_net.mode_net.pdf_z,
'device': map_loc,
})
return model
|
[
"func_util.nn_util.get_value",
"real_life.bitstream.ArithmeticCoder",
"func_util.console_display.print_log_msg",
"model_mngt.loss_function.compute_metrics_one_GOP",
"math.ceil",
"torch.load",
"os.system",
"func_util.result_logging.generate_header_file",
"torch.cuda.empty_cache",
"torch.device",
"func_util.img_processing.load_frames",
"torch.no_grad",
"model_mngt.loss_function.average_N_frame",
"real_life.cat_binary_files.cat_one_video"
] |
[((2891, 2931), 'func_util.nn_util.get_value', 'get_value', (['"""model"""', 'param', 'DEFAULT_PARAM'], {}), "('model', param, DEFAULT_PARAM)\n", (2900, 2931), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((2949, 2994), 'func_util.nn_util.get_value', 'get_value', (['"""GOP_struct"""', 'param', 'DEFAULT_PARAM'], {}), "('GOP_struct', param, DEFAULT_PARAM)\n", (2958, 2994), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3017, 3067), 'func_util.nn_util.get_value', 'get_value', (['"""GOP_struct_name"""', 'param', 'DEFAULT_PARAM'], {}), "('GOP_struct_name', param, DEFAULT_PARAM)\n", (3026, 3067), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3088, 3136), 'func_util.nn_util.get_value', 'get_value', (['"""sequence_path"""', 'param', 'DEFAULT_PARAM'], {}), "('sequence_path', param, DEFAULT_PARAM)\n", (3097, 3136), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3219, 3272), 'func_util.nn_util.get_value', 'get_value', (['"""idx_starting_frame"""', 'param', 'DEFAULT_PARAM'], {}), "('idx_starting_frame', param, DEFAULT_PARAM)\n", (3228, 3272), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3293, 3341), 'func_util.nn_util.get_value', 'get_value', (['"""idx_end_frame"""', 'param', 'DEFAULT_PARAM'], {}), "('idx_end_frame', param, DEFAULT_PARAM)\n", (3302, 3341), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3352, 3390), 'func_util.nn_util.get_value', 'get_value', (['"""rgb"""', 'param', 'DEFAULT_PARAM'], {}), "('rgb', param, DEFAULT_PARAM)\n", (3361, 3390), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3410, 3457), 'func_util.nn_util.get_value', 'get_value', (['"""loading_mode"""', 'param', 'DEFAULT_PARAM'], {}), "('loading_mode', param, DEFAULT_PARAM)\n", (3419, 3457), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3483, 3536), 'func_util.nn_util.get_value', 'get_value', (['"""generate_bitstream"""', 'param', 'DEFAULT_PARAM'], {}), "('generate_bitstream', param, DEFAULT_PARAM)\n", (3492, 3536), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3557, 3605), 'func_util.nn_util.get_value', 'get_value', (['"""bitstream_dir"""', 'param', 'DEFAULT_PARAM'], {}), "('bitstream_dir', param, DEFAULT_PARAM)\n", (3566, 3605), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3621, 3664), 'func_util.nn_util.get_value', 'get_value', (['"""idx_rate"""', 'param', 'DEFAULT_PARAM'], {}), "('idx_rate', param, DEFAULT_PARAM)\n", (3630, 3664), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3692, 3747), 'func_util.nn_util.get_value', 'get_value', (['"""flag_bitstream_debug"""', 'param', 'DEFAULT_PARAM'], {}), "('flag_bitstream_debug', param, DEFAULT_PARAM)\n", (3701, 3747), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3766, 3812), 'func_util.nn_util.get_value', 'get_value', (['"""working_dir"""', 'param', 'DEFAULT_PARAM'], {}), "('working_dir', param, DEFAULT_PARAM)\n", (3775, 3812), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((3840, 3895), 'func_util.nn_util.get_value', 'get_value', (['"""final_bitstream_path"""', 'param', 'DEFAULT_PARAM'], {}), "('final_bitstream_path', param, DEFAULT_PARAM)\n", (3849, 3895), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((4225, 4261), 'os.system', 'os.system', (["('mkdir -p ' + working_dir)"], {}), "('mkdir -p ' + working_dir)\n", (4234, 4261), False, 'import os\n'), ((5039, 5114), 'func_util.console_display.print_log_msg', 'print_log_msg', (['"""INFO"""', '"""infer_one_sequence"""', '"""sequence name"""', 'sequence_name'], {}), "('INFO', 'infer_one_sequence', 'sequence name', sequence_name)\n", (5052, 5114), False, 'from func_util.console_display import print_log_msg\n'), ((5119, 5198), 'func_util.console_display.print_log_msg', 'print_log_msg', (['"""INFO"""', '"""infer_one_sequence"""', '"""GOP_struct_name"""', 'GOP_struct_name'], {}), "('INFO', 'infer_one_sequence', 'GOP_struct_name', GOP_struct_name)\n", (5132, 5198), False, 'from func_util.console_display import print_log_msg\n'), ((5203, 5268), 'func_util.console_display.print_log_msg', 'print_log_msg', (['"""INFO"""', '"""infer_one_sequence"""', '"""idx_rate"""', 'idx_rate'], {}), "('INFO', 'infer_one_sequence', 'idx_rate', idx_rate)\n", (5216, 5268), False, 'from func_util.console_display import print_log_msg\n'), ((6295, 6326), 'math.ceil', 'math.ceil', (['(nb_frames / GOP_size)'], {}), '(nb_frames / GOP_size)\n', (6304, 6326), False, 'import math\n'), ((6736, 6798), 'func_util.console_display.print_log_msg', 'print_log_msg', (['"""DEBUG"""', '"""infer_one_sequence"""', '"""nb_GOP"""', 'nb_GOP'], {}), "('DEBUG', 'infer_one_sequence', 'nb_GOP', nb_GOP)\n", (6749, 6798), False, 'from func_util.console_display import print_log_msg\n'), ((8865, 8941), 'model_mngt.loss_function.average_N_frame', 'average_N_frame', (["{'x': sequence_result, 'nb_pad_frame': cur_nb_frame_to_pad}"], {}), "({'x': sequence_result, 'nb_pad_frame': cur_nb_frame_to_pad})\n", (8880, 8941), False, 'from model_mngt.loss_function import compute_metrics_one_GOP, average_N_frame\n'), ((11611, 11651), 'func_util.nn_util.get_value', 'get_value', (['"""model"""', 'param', 'DEFAULT_PARAM'], {}), "('model', param, DEFAULT_PARAM)\n", (11620, 11651), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((11669, 11714), 'func_util.nn_util.get_value', 'get_value', (['"""GOP_struct"""', 'param', 'DEFAULT_PARAM'], {}), "('GOP_struct', param, DEFAULT_PARAM)\n", (11678, 11714), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((11732, 11777), 'func_util.nn_util.get_value', 'get_value', (['"""raw_frames"""', 'param', 'DEFAULT_PARAM'], {}), "('raw_frames', param, DEFAULT_PARAM)\n", (11741, 11777), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((11792, 11834), 'func_util.nn_util.get_value', 'get_value', (['"""l_codec"""', 'param', 'DEFAULT_PARAM'], {}), "('l_codec', param, DEFAULT_PARAM)\n", (11801, 11834), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((11847, 11887), 'func_util.nn_util.get_value', 'get_value', (['"""l_mof"""', 'param', 'DEFAULT_PARAM'], {}), "('l_mof', param, DEFAULT_PARAM)\n", (11856, 11887), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((11913, 11966), 'func_util.nn_util.get_value', 'get_value', (['"""index_GOP_in_video"""', 'param', 'DEFAULT_PARAM'], {}), "('index_GOP_in_video', param, DEFAULT_PARAM)\n", (11922, 11966), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((11992, 12045), 'func_util.nn_util.get_value', 'get_value', (['"""generate_bitstream"""', 'param', 'DEFAULT_PARAM'], {}), "('generate_bitstream', param, DEFAULT_PARAM)\n", (12001, 12045), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((12066, 12114), 'func_util.nn_util.get_value', 'get_value', (['"""bitstream_dir"""', 'param', 'DEFAULT_PARAM'], {}), "('bitstream_dir', param, DEFAULT_PARAM)\n", (12075, 12114), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((12142, 12197), 'func_util.nn_util.get_value', 'get_value', (['"""real_idx_first_frame"""', 'param', 'DEFAULT_PARAM'], {}), "('real_idx_first_frame', param, DEFAULT_PARAM)\n", (12151, 12197), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((12213, 12256), 'func_util.nn_util.get_value', 'get_value', (['"""idx_rate"""', 'param', 'DEFAULT_PARAM'], {}), "('idx_rate', param, DEFAULT_PARAM)\n", (12222, 12256), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((12284, 12339), 'func_util.nn_util.get_value', 'get_value', (['"""flag_bitstream_debug"""', 'param', 'DEFAULT_PARAM'], {}), "('flag_bitstream_debug', param, DEFAULT_PARAM)\n", (12293, 12339), False, 'from func_util.nn_util import crop_dic, get_value\n'), ((13422, 13446), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13444, 13446), False, 'import torch\n'), ((13463, 13570), 'model_mngt.loss_function.compute_metrics_one_GOP', 'compute_metrics_one_GOP', (["{'net_out': net_out, 'target': raw_frames, 'l_mof': l_mof, 'l_codec': l_codec}"], {}), "({'net_out': net_out, 'target': raw_frames, 'l_mof':\n l_mof, 'l_codec': l_codec})\n", (13486, 13570), False, 'from model_mngt.loss_function import compute_metrics_one_GOP, average_N_frame\n'), ((13925, 13985), 'torch.load', 'torch.load', (["('./' + prefix + 'model.pt')"], {'map_location': 'map_loc'}), "('./' + prefix + 'model.pt', map_location=map_loc)\n", (13935, 13985), False, 'import torch\n'), ((14140, 14234), 'real_life.bitstream.ArithmeticCoder', 'ArithmeticCoder', (["{'balle_pdf_estim_z': model.codec_net.codec_net.pdf_z, 'device': map_loc}"], {}), "({'balle_pdf_estim_z': model.codec_net.codec_net.pdf_z,\n 'device': map_loc})\n", (14155, 14234), False, 'from real_life.bitstream import ArithmeticCoder\n'), ((14288, 14380), 'real_life.bitstream.ArithmeticCoder', 'ArithmeticCoder', (["{'balle_pdf_estim_z': model.mode_net.mode_net.pdf_z, 'device': map_loc}"], {}), "({'balle_pdf_estim_z': model.mode_net.mode_net.pdf_z,\n 'device': map_loc})\n", (14303, 14380), False, 'from real_life.bitstream import ArithmeticCoder\n'), ((5506, 5541), 'os.system', 'os.system', (["('rm -r ' + bitstream_dir)"], {}), "('rm -r ' + bitstream_dir)\n", (5515, 5541), False, 'import os\n'), ((5550, 5588), 'os.system', 'os.system', (["('mkdir -p ' + bitstream_dir)"], {}), "('mkdir -p ' + bitstream_dir)\n", (5559, 5588), False, 'import os\n'), ((7065, 7288), 'func_util.img_processing.load_frames', 'load_frames', (["{'sequence_path': sequence_path, 'idx_starting_frame': i * GOP_size +\n idx_starting_frame, 'nb_frame_to_load': GOP_size, 'nb_pad_frame':\n cur_nb_frame_to_pad, 'rgb': rgb, 'loading_mode': loading_mode}"], {}), "({'sequence_path': sequence_path, 'idx_starting_frame': i *\n GOP_size + idx_starting_frame, 'nb_frame_to_load': GOP_size,\n 'nb_pad_frame': cur_nb_frame_to_pad, 'rgb': rgb, 'loading_mode':\n loading_mode})\n", (7076, 7288), False, 'from func_util.img_processing import load_frames, cast_before_png_saving\n'), ((9041, 9216), 'real_life.cat_binary_files.cat_one_video', 'cat_one_video', (["{'bitstream_dir': bitstream_dir, 'idx_starting_frame': idx_starting_frame,\n 'idx_end_frame': idx_end_frame, 'final_bitstream_path':\n final_bitstream_path}"], {}), "({'bitstream_dir': bitstream_dir, 'idx_starting_frame':\n idx_starting_frame, 'idx_end_frame': idx_end_frame,\n 'final_bitstream_path': final_bitstream_path})\n", (9054, 9216), False, 'from real_life.cat_binary_files import cat_one_video\n'), ((9587, 9609), 'func_util.result_logging.generate_header_file', 'generate_header_file', ([], {}), '()\n', (9607, 9609), False, 'from func_util.result_logging import generate_header_file, generate_log_metric_one_frame\n'), ((13046, 13061), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13059, 13061), False, 'import torch\n'), ((13873, 13892), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (13885, 13892), False, 'import torch\n'), ((5754, 5792), 'os.system', 'os.system', (["('rm ' + debug_dir + '*.md5')"], {}), "('rm ' + debug_dir + '*.md5')\n", (5763, 5792), False, 'import os\n'), ((5805, 5839), 'os.system', 'os.system', (["('mkdir -p ' + debug_dir)"], {}), "('mkdir -p ' + debug_dir)\n", (5814, 5839), False, 'import os\n')]
|
import telepot
from telepot.loop import MessageLoop
import sys
from control_bot import control
from bot_command import *
from time import sleep
TOKEN = sys.argv[1]
bot = telepot.Bot(TOKEN)
def handle(msg):
main = control(msg, bot)
inst_commnand_user = command_user(msg=msg, bot=bot)
inst_command_admin = command_admin(msg=msg, bot=bot)
if msg.get('data'):
text = 'None'
ctext = 'None'
inst_command_admin.unwarn(data=msg['data'])
else:
try:
text = msg['text'].split(' ')
ctext = text[0].lower()
except:
text = None
ctext = None
admin_commands = {
'/ban' :inst_command_admin.ban,
'/warn' :inst_command_admin.warn,
'/unwarn' :inst_command_admin.unwarn,
'/deflink' :inst_command_admin.deflink,
'/defregras' :inst_command_admin.defregras,
'/welcome' :inst_command_admin.defwelcome,
'/addb' :inst_command_admin.add
}
user_command = {
'/start' :inst_commnand_user.start,
'/info' :inst_commnand_user.info,
'/ajuda' :inst_commnand_user.ajuda,
'/link' :inst_commnand_user.link,
'/regras' :inst_commnand_user.regras
}
others = {
'left_chat_member' :inst_commnand_user.goodbye,
'new_chat_member' :inst_commnand_user.new_member
}
if admin_commands.get(ctext):
if ctext.startswith('/deflink'):
admin_commands[ctext](msg['text'])
elif ctext.startswith('/defregras'):
admin_commands[ctext](msg['text'])
elif ctext.startswith('/welcome'):
admin_commands[ctext](msg['text'])
else:
admin_commands[ctext]()
elif user_command.get(ctext):
user_command[ctext]()
if msg.get('left_chat_member'):
others['left_chat_member']()
elif msg.get('new_chat_member'):
others['new_chat_member']()
if __name__ == '__main__':
MessageLoop(bot, handle).run_as_thread()
while 1:
sleep(100)
|
[
"telepot.Bot",
"time.sleep",
"telepot.loop.MessageLoop",
"control_bot.control"
] |
[((171, 189), 'telepot.Bot', 'telepot.Bot', (['TOKEN'], {}), '(TOKEN)\n', (182, 189), False, 'import telepot\n'), ((216, 233), 'control_bot.control', 'control', (['msg', 'bot'], {}), '(msg, bot)\n', (223, 233), False, 'from control_bot import control\n'), ((1834, 1844), 'time.sleep', 'sleep', (['(100)'], {}), '(100)\n', (1839, 1844), False, 'from time import sleep\n'), ((1780, 1804), 'telepot.loop.MessageLoop', 'MessageLoop', (['bot', 'handle'], {}), '(bot, handle)\n', (1791, 1804), False, 'from telepot.loop import MessageLoop\n')]
|
import logging
import numpy as np
from amset.constants import defaults
from amset.deformation.common import desymmetrize_deformation_potentials
from amset.deformation.io import load_deformation_potentials
from amset.electronic_structure.kpoints import get_mesh_from_kpoint_numbers
from amset.electronic_structure.symmetry import expand_kpoints
from amset.interpolation.periodic import PeriodicLinearInterpolator
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
logger = logging.getLogger(__name__)
class DeformationPotentialInterpolator(PeriodicLinearInterpolator):
@classmethod
def from_file(cls, filename, scale=1.0):
deform_potentials, kpoints, structure = load_deformation_potentials(filename)
deform_potentials = {s: d * scale for s, d in deform_potentials.items()}
return cls.from_deformation_potentials(deform_potentials, kpoints, structure)
@classmethod
def from_deformation_potentials(
cls, deformation_potentials, kpoints, structure, symprec=defaults["symprec"]
):
logger.info("Initializing deformation potential interpolator")
mesh_dim = get_mesh_from_kpoint_numbers(kpoints)
if np.product(mesh_dim) == len(kpoints):
return cls.from_data(kpoints, deformation_potentials)
full_kpoints, rotations, _, _, op_mapping, kp_mapping = expand_kpoints(
structure, kpoints, time_reversal=True, return_mapping=True, symprec=symprec
)
logger.warning("Desymmetrizing deformation potentials, this could go wrong.")
deformation_potentials = desymmetrize_deformation_potentials(
deformation_potentials, structure, rotations, op_mapping, kp_mapping
)
return cls.from_data(full_kpoints, deformation_potentials)
|
[
"amset.deformation.common.desymmetrize_deformation_potentials",
"amset.electronic_structure.kpoints.get_mesh_from_kpoint_numbers",
"numpy.product",
"amset.electronic_structure.symmetry.expand_kpoints",
"logging.getLogger",
"amset.deformation.io.load_deformation_potentials"
] |
[((495, 522), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (512, 522), False, 'import logging\n'), ((703, 740), 'amset.deformation.io.load_deformation_potentials', 'load_deformation_potentials', (['filename'], {}), '(filename)\n', (730, 740), False, 'from amset.deformation.io import load_deformation_potentials\n'), ((1146, 1183), 'amset.electronic_structure.kpoints.get_mesh_from_kpoint_numbers', 'get_mesh_from_kpoint_numbers', (['kpoints'], {}), '(kpoints)\n', (1174, 1183), False, 'from amset.electronic_structure.kpoints import get_mesh_from_kpoint_numbers\n'), ((1364, 1460), 'amset.electronic_structure.symmetry.expand_kpoints', 'expand_kpoints', (['structure', 'kpoints'], {'time_reversal': '(True)', 'return_mapping': '(True)', 'symprec': 'symprec'}), '(structure, kpoints, time_reversal=True, return_mapping=True,\n symprec=symprec)\n', (1378, 1460), False, 'from amset.electronic_structure.symmetry import expand_kpoints\n'), ((1598, 1707), 'amset.deformation.common.desymmetrize_deformation_potentials', 'desymmetrize_deformation_potentials', (['deformation_potentials', 'structure', 'rotations', 'op_mapping', 'kp_mapping'], {}), '(deformation_potentials, structure,\n rotations, op_mapping, kp_mapping)\n', (1633, 1707), False, 'from amset.deformation.common import desymmetrize_deformation_potentials\n'), ((1195, 1215), 'numpy.product', 'np.product', (['mesh_dim'], {}), '(mesh_dim)\n', (1205, 1215), True, 'import numpy as np\n')]
|
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import logging
import re
import warnings
from collections import Counter
from functools import partial
from math import ceil
from copy import copy
import six
from cobra import DictList
from sympy import Add, Mul, RealNumber
from cobra import Model, Metabolite, Reaction
from cobra.util import SolverNotFound
from cobra.exceptions import OptimizationError
from cameo import fba
from cameo import models, phenotypic_phase_plane
from cameo.config import non_zero_flux_threshold
from cameo.core.pathway import Pathway
from cameo.core.result import Result, MetaInformation
from cameo.core.strain_design import StrainDesignMethodResult, StrainDesign, StrainDesignMethod
from cameo.core.target import ReactionKnockinTarget
from cameo.data import metanetx
from cameo.strain_design.pathway_prediction import util
from cameo.util import TimeMachine
from cameo.visualization.plotting import plotter
__all__ = ['PathwayPredictor']
logger = logging.getLogger(__name__)
add = Add._from_args
mul = Mul._from_args
class PathwayResult(Pathway, Result, StrainDesign):
def __init__(self, reactions, exchanges, adapters, product, *args, **kwargs):
self._meta_information = MetaInformation()
self.reactions = reactions
self.exchanges = exchanges
self.adapters = adapters
self.product = product
self.targets = self._build_targets()
def _replace_adapted_metabolites(self, reaction):
"""
Replace adapted metabolites by model metabolites
Parameters
----------
reaction: cameo.core.reaction.Reaction
Returns
-------
cameo.core.reaction.Reaction
"""
stoichiometry = {}
for metabolite, coefficient in six.iteritems(reaction.metabolites):
found = False
for adapter in self.adapters:
if metabolite == adapter.products[0]:
metabolite = adapter.reactants[0]
found = False
break
if not found:
metabolite = metabolite
stoichiometry[metabolite] = coefficient
reaction = Reaction(id=reaction.id,
name=reaction.name,
lower_bound=reaction.lower_bound,
upper_bound=reaction.upper_bound)
reaction.add_metabolites(stoichiometry)
return reaction
def _build_targets(self):
targets = DictList()
for reaction in self.reactions:
reaction = self._replace_adapted_metabolites(reaction)
if reaction.id in metanetx.mnx2all:
target = ReactionKnockinTarget(reaction.id, reaction, accession_id=reaction.id, accession_db='metanetx')
else:
target = ReactionKnockinTarget(reaction.id, reaction)
targets.append(target)
for reaction in self.exchanges:
reaction = self._replace_adapted_metabolites(reaction)
targets.append(ReactionKnockinTarget(reaction.id, reaction))
product = self._replace_adapted_metabolites(self.product)
product.lower_bound = 0
targets.append(ReactionKnockinTarget(product.id, product))
return targets
def plot(self, **kwargs):
pass
def needs_optimization(self, model, objective=None):
area = self.production_envelope(model, objective).area
return area > 1e-5
def production_envelope(self, model, objective=None):
with model:
self.apply(model)
return phenotypic_phase_plane(model, variables=[objective], objective=self.product.id)
def plug_model(self, model, adapters=True, exchanges=True):
warnings.warn("The 'plug_model' method as been deprecated. Use apply instead.", DeprecationWarning)
model.add_reactions(self.reactions)
if adapters:
model.add_reactions(self.adapters)
if exchanges:
model.add_reactions(self.exchanges)
try:
model.add_reaction(self.product)
except Exception:
logger.warning("Exchange %s already in model" % self.product.id)
pass
self.product.lower_bound = 0
class PathwayPredictions(StrainDesignMethodResult):
__method_name__ = "PathwayPredictor"
def __init__(self, pathways, *args, **kwargs):
super(PathwayPredictions, self).__init__(pathways, *args, **kwargs)
@property
def pathways(self):
return self._designs
def plug_model(self, model, index):
warnings.warn("The 'plug_model' method as been deprecated. You can use result[i].apply instead",
DeprecationWarning)
self.pathways[index].plug_model(model)
def __getitem__(self, item):
return self.pathways[item]
def __str__(self):
string = str()
for i, pathway in enumerate(self.pathways):
string += 'Pathway No. {}'.format(i + 1)
for reaction in pathway.reactions:
string += '{}, {}:'.format(reaction.id, reaction.name,
reaction.build_reaction_string(use_metabolite_names=True))
return string
def plot(self, grid=None, width=None, height=None, title=None):
# TODO: small pathway visualizations would be great.
raise NotImplementedError
def plot_production_envelopes(self, model, objective=None, title=None):
rows = int(ceil(len(self.pathways) / 2.0))
title = "Production envelops for %s" % self.pathways[0].product.name if title is None else title
grid = plotter.grid(n_rows=rows, title=title)
with grid:
for i, pathway in enumerate(self.pathways):
ppp = pathway.production_envelope(model, objective=objective)
ppp.plot(grid=grid, width=450, title="Pathway %i" % (i + 1))
class PathwayPredictor(StrainDesignMethod):
"""Pathway predictions from a universal set of reaction.
Parameters
----------
model : cobra.Model
The model that represents the host organism.
universal_model : cobra.Model, optional
The model that represents the universal set of reactions.
A default model will be used if omitted.
mapping : dict, optional
A dictionary that contains a mapping between metabolite
identifiers in `model` and `universal_model`
compartment_regexp : str, optional
A regular expression that matches host metabolites' compartments
that should be connected to the universal reaction model. If not
provided, the compartment containing most metabolites will be
chosen.
Attributes
----------
model : cobra.Model
The provided model + universal_model + adapter reactions
Examples
--------
Determine production pathways for propane-1,3-diol (MNXM2861 in the metanetx namespace)
>>> from cameo.api import hosts
>>> pathway_predictor = PathwayPredictor(hosts.ecoli.iJO1366)
>>> pathway_predictor.run(product=pathway_predictor.model.metabolites.MNXM2861)
"""
def __init__(self, model, universal_model=None, mapping=None, compartment_regexp=None):
""""""
self.original_model = model
if compartment_regexp is None:
compartments_tally = Counter(metabolite.compartment for metabolite in self.original_model.metabolites)
most_common_compartment = compartments_tally.most_common(n=1)[0][0]
compartment_regexp = re.compile('^' + most_common_compartment + '$')
else:
compartment_regexp = re.compile(compartment_regexp)
if universal_model is None:
logger.debug("Loading default universal model.")
self.universal_model = models.universal.metanetx_universal_model_bigg
elif isinstance(universal_model, Model):
self.universal_model = universal_model
else:
raise ValueError('Provided universal_model %s is not a model.' % universal_model)
self.products = self.universal_model.metabolites
if mapping is None:
self.mapping = metanetx.all2mnx
else:
self.mapping = mapping
self.model = model.copy()
try:
logger.info('Trying to set solver to cplex to speed up pathway predictions.')
self.model.solver = 'cplex'
except SolverNotFound:
logger.info('cplex not available for pathway predictions.')
self.new_reactions = self._extend_model(model.exchanges)
logger.debug("Adding adapter reactions to connect model with universal model.")
self.adpater_reactions = util.create_adapter_reactions(model.metabolites, self.universal_model,
self.mapping, compartment_regexp)
self.model.add_reactions(self.adpater_reactions)
self._add_switches(self.new_reactions)
def run(self, product=None, max_predictions=float("inf"), min_production=.1,
timeout=None, callback=None, silent=False, allow_native_exchanges=False):
"""Run pathway prediction for a desired product.
Parameters
----------
product : Metabolite, str
Metabolite or id or name of metabolite to find production pathways for.
max_predictions : int, optional
The maximum number of predictions to compute.
min_production : float
The minimum acceptable production flux to product.
timeout : int
The time limit [seconds] per attempted prediction.
callback : function
A function that takes a successfully predicted pathway.
silent : bool
If True will print the pathways and max flux values.
allow_native_exchanges: bool
If True, exchange reactions for native metabolites will be allowed.
Returns
-------
PathwayPredictions
The predicted pathways.
"""
product = self._find_product(product)
pathways = list()
with TimeMachine() as tm, self.model:
tm(do=partial(setattr, self.model.solver.configuration, 'timeout', timeout),
undo=partial(setattr, self.model.solver.configuration, 'timeout',
self.model.solver.configuration.timeout))
try:
product_reaction = self.model.reactions.get_by_id('DM_' + product.id)
except KeyError:
product_reaction = self.model.add_boundary(product, type='demand')
product_reaction.lower_bound = min_production
counter = 1
while counter <= max_predictions:
logger.debug('Predicting pathway No. %d' % counter)
try:
self.model.slim_optimize(error_value=None)
except OptimizationError as e:
logger.error('No pathway could be predicted. Terminating pathway predictions.')
logger.error(e)
break
vars_to_cut = list()
for i, y_var_id in enumerate(self._y_vars_ids):
y_var = self.model.solver.variables[y_var_id]
if y_var.primal == 1.0:
vars_to_cut.append(y_var)
logger.info(vars_to_cut)
if len(vars_to_cut) == 0:
# no pathway found:
logger.info("It seems %s is a native product in model %s. "
"Let's see if we can find better heterologous pathways." % (product, self.model))
# knockout adapter with native product
for adapter in self.adpater_reactions:
if product in adapter.metabolites:
logger.info('Knocking out adapter reaction %s containing native product.' % adapter)
adapter.knock_out()
continue
pathway = [self.model.reactions.get_by_id(y_var.name[2:]) for y_var in vars_to_cut]
pathway_metabolites = set([m for pathway_reaction in pathway for m in pathway_reaction.metabolites])
logger.info('Pathway predicted: %s' % '\t'.join(
[r.build_reaction_string(use_metabolite_names=True) for r in pathway]))
pathway_metabolites.add(product)
# Figure out adapter reactions to include
adapters = [adapter for adapter in self.adpater_reactions if adapter.products[0] in pathway_metabolites]
# Figure out exchange reactions to include
exchanges = [exchange for exchange in self._exchanges
if abs(exchange.flux) > non_zero_flux_threshold and exchange.id != product_reaction.id]
if allow_native_exchanges:
exchanges = [exchange for exchange in exchanges
if list(exchange.metabolites)[0] in pathway_metabolites]
pathway = PathwayResult(pathway, exchanges, adapters, product_reaction)
if not silent:
util.display_pathway(pathway, counter)
integer_cut = self.model.solver.interface.Constraint(Add(*vars_to_cut),
name="integer_cut_" + str(counter),
ub=len(vars_to_cut) - 1)
logger.debug('Adding integer cut.')
tm(
do=partial(self.model.solver.add, integer_cut),
undo=partial(self.model.solver.remove, integer_cut))
# Test pathway in the original model
with self.original_model:
pathway.apply(self.original_model)
try:
solution = fba(self.original_model, objective=pathway.product.id)
except OptimizationError as e:
logger.error(e)
logger.error(
"Addition of pathway {} made the model unsolvable. "
"Skipping pathway.".format(pathway))
continue
else:
if solution[pathway.product.id] > non_zero_flux_threshold:
pathways.append(pathway)
if not silent:
print("Max flux: %.5f" % solution[pathway.product.id])
else:
logger.error(
"Pathway {} couldn't be verified. Production flux {}"
"is below requirement {}. Skipping pathway.".format(
pathway, solution[pathway.product.id], non_zero_flux_threshold))
finally:
counter += 1
if callback is not None:
callback(pathway)
return PathwayPredictions(pathways)
def _add_switches(self, reactions):
logger.info("Adding switches.")
y_vars = list()
switches = list()
self._exchanges = list()
for reaction in reactions:
if reaction.id.startswith('DM_'):
# demand reactions don't need integer switches
self._exchanges.append(reaction)
continue
y = self.model.solver.interface.Variable('y_' + reaction.id, lb=0, ub=1, type='binary')
y_vars.append(y)
# The following is a complicated but efficient way to write the following constraints
# switch_lb = self.model.solver.interface.Constraint(y * reaction.lower_bound - reaction.flux_expression,
# name='switch_lb_' + reaction.id, ub=0)
# switch_ub = self.model.solver.interface.Constraint(y * reaction.upper_bound - reaction.flux_expression,
# name='switch_ub_' + reaction.id, lb=0)
forward_term = mul((RealNumber(-1), reaction.forward_variable))
reverse_term = mul((RealNumber(-1), reaction.reverse_variable))
switch_lb_term = mul((RealNumber(reaction.lower_bound), y))
switch_ub_term = mul((RealNumber(reaction.upper_bound), y))
switch_lb = self.model.solver.interface.Constraint(add((switch_lb_term, forward_term, reverse_term)),
name='switch_lb_' + reaction.id, ub=0, sloppy=True)
switch_ub = self.model.solver.interface.Constraint(add((switch_ub_term, forward_term, reverse_term)),
name='switch_ub_' + reaction.id, lb=0, sloppy=True)
switches.extend([switch_lb, switch_ub])
self.model.solver.add(y_vars)
self.model.solver.add(switches, sloppy=True)
logger.info("Setting minimization of switch variables as objective.")
self.model.objective = self.model.solver.interface.Objective(Add(*y_vars), direction='min')
self._y_vars_ids = [var.name for var in y_vars]
def _extend_model(self, original_exchanges):
for exchange in self.model.exchanges:
if len(exchange.reactants) > 0 >= exchange.lower_bound:
exchange.upper_bound = 999999.
logger.info("Adding reactions from universal model to host model.")
new_reactions = list()
original_model_metabolites = [self.mapping.get('bigg:' + m.id[0:-2], m.id) for
r in original_exchanges for m, coeff in six.iteritems(r.metabolites)
if len(r.metabolites) == 1 and coeff < 0 < r.upper_bound]
universal_exchanges = self.universal_model.exchanges
for reaction in self.universal_model.reactions:
if reaction in self.model.reactions:
continue
if reaction in universal_exchanges:
metabolite = list(reaction.metabolites.keys())[0]
if metabolite.id in original_model_metabolites:
continue
new_reactions.append(copy(reaction))
self.model.add_reactions(new_reactions)
return new_reactions
def _find_product(self, product):
if isinstance(product, six.string_types):
for metabolite in self.model.metabolites:
if metabolite.id == product:
return metabolite
if metabolite.name == product:
return metabolite
raise ValueError(
"Specified product '{product}' could not be found. "
"Try searching pathway_predictor_obj.universal_model.metabolites".format(product=product))
elif isinstance(product, Metabolite):
try:
return self.model.metabolites.get_by_id(product.id)
except KeyError:
raise ValueError('Provided product %s cannot be found in universal reaction database.' % product)
else:
raise ValueError('Provided product %s is neither a metabolite nor an ID or name.' % product)
if __name__ == '__main__':
from cameo.api import hosts
pathway_predictor = PathwayPredictor(hosts.ecoli.models.EcoliCore)
print(pathway_predictor.run(product=pathway_predictor.model.metabolites.MNXM53)) # MNXM53 = L-serine
|
[
"six.iteritems",
"cameo.core.result.MetaInformation",
"sympy.RealNumber",
"collections.Counter",
"cobra.DictList",
"cameo.phenotypic_phase_plane",
"functools.partial",
"cameo.strain_design.pathway_prediction.util.display_pathway",
"cameo.strain_design.pathway_prediction.util.create_adapter_reactions",
"cameo.visualization.plotting.plotter.grid",
"cameo.fba",
"re.compile",
"sympy.Add",
"copy.copy",
"cameo.util.TimeMachine",
"cobra.Reaction",
"warnings.warn",
"cameo.core.target.ReactionKnockinTarget",
"logging.getLogger"
] |
[((1612, 1639), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1629, 1639), False, 'import logging\n'), ((1852, 1869), 'cameo.core.result.MetaInformation', 'MetaInformation', ([], {}), '()\n', (1867, 1869), False, 'from cameo.core.result import Result, MetaInformation\n'), ((2408, 2443), 'six.iteritems', 'six.iteritems', (['reaction.metabolites'], {}), '(reaction.metabolites)\n', (2421, 2443), False, 'import six\n'), ((2820, 2937), 'cobra.Reaction', 'Reaction', ([], {'id': 'reaction.id', 'name': 'reaction.name', 'lower_bound': 'reaction.lower_bound', 'upper_bound': 'reaction.upper_bound'}), '(id=reaction.id, name=reaction.name, lower_bound=reaction.\n lower_bound, upper_bound=reaction.upper_bound)\n', (2828, 2937), False, 'from cobra import Model, Metabolite, Reaction\n'), ((3139, 3149), 'cobra.DictList', 'DictList', ([], {}), '()\n', (3147, 3149), False, 'from cobra import DictList\n'), ((4393, 4496), 'warnings.warn', 'warnings.warn', (['"""The \'plug_model\' method as been deprecated. Use apply instead."""', 'DeprecationWarning'], {}), '("The \'plug_model\' method as been deprecated. Use apply instead.",\n DeprecationWarning)\n', (4406, 4496), False, 'import warnings\n'), ((5230, 5356), 'warnings.warn', 'warnings.warn', (['"""The \'plug_model\' method as been deprecated. You can use result[i].apply instead"""', 'DeprecationWarning'], {}), '(\n "The \'plug_model\' method as been deprecated. You can use result[i].apply instead"\n , DeprecationWarning)\n', (5243, 5356), False, 'import warnings\n'), ((6291, 6329), 'cameo.visualization.plotting.plotter.grid', 'plotter.grid', ([], {'n_rows': 'rows', 'title': 'title'}), '(n_rows=rows, title=title)\n', (6303, 6329), False, 'from cameo.visualization.plotting import plotter\n'), ((9359, 9468), 'cameo.strain_design.pathway_prediction.util.create_adapter_reactions', 'util.create_adapter_reactions', (['model.metabolites', 'self.universal_model', 'self.mapping', 'compartment_regexp'], {}), '(model.metabolites, self.universal_model, self\n .mapping, compartment_regexp)\n', (9388, 9468), False, 'from cameo.strain_design.pathway_prediction import util\n'), ((3852, 3894), 'cameo.core.target.ReactionKnockinTarget', 'ReactionKnockinTarget', (['product.id', 'product'], {}), '(product.id, product)\n', (3873, 3894), False, 'from cameo.core.target import ReactionKnockinTarget\n'), ((4240, 4319), 'cameo.phenotypic_phase_plane', 'phenotypic_phase_plane', (['model'], {'variables': '[objective]', 'objective': 'self.product.id'}), '(model, variables=[objective], objective=self.product.id)\n', (4262, 4319), False, 'from cameo import models, phenotypic_phase_plane\n'), ((8001, 8087), 'collections.Counter', 'Counter', (['(metabolite.compartment for metabolite in self.original_model.metabolites)'], {}), '(metabolite.compartment for metabolite in self.original_model.\n metabolites)\n', (8008, 8087), False, 'from collections import Counter\n'), ((8196, 8243), 're.compile', 're.compile', (["('^' + most_common_compartment + '$')"], {}), "('^' + most_common_compartment + '$')\n", (8206, 8243), False, 'import re\n'), ((8291, 8321), 're.compile', 're.compile', (['compartment_regexp'], {}), '(compartment_regexp)\n', (8301, 8321), False, 'import re\n'), ((10785, 10798), 'cameo.util.TimeMachine', 'TimeMachine', ([], {}), '()\n', (10796, 10798), False, 'from cameo.util import TimeMachine\n'), ((17965, 17977), 'sympy.Add', 'Add', (['*y_vars'], {}), '(*y_vars)\n', (17968, 17977), False, 'from sympy import Add, Mul, RealNumber\n'), ((3330, 3429), 'cameo.core.target.ReactionKnockinTarget', 'ReactionKnockinTarget', (['reaction.id', 'reaction'], {'accession_id': 'reaction.id', 'accession_db': '"""metanetx"""'}), "(reaction.id, reaction, accession_id=reaction.id,\n accession_db='metanetx')\n", (3351, 3429), False, 'from cameo.core.target import ReactionKnockinTarget\n'), ((3469, 3513), 'cameo.core.target.ReactionKnockinTarget', 'ReactionKnockinTarget', (['reaction.id', 'reaction'], {}), '(reaction.id, reaction)\n', (3490, 3513), False, 'from cameo.core.target import ReactionKnockinTarget\n'), ((3684, 3728), 'cameo.core.target.ReactionKnockinTarget', 'ReactionKnockinTarget', (['reaction.id', 'reaction'], {}), '(reaction.id, reaction)\n', (3705, 3728), False, 'from cameo.core.target import ReactionKnockinTarget\n'), ((18536, 18564), 'six.iteritems', 'six.iteritems', (['r.metabolites'], {}), '(r.metabolites)\n', (18549, 18564), False, 'import six\n'), ((19094, 19108), 'copy.copy', 'copy', (['reaction'], {}), '(reaction)\n', (19098, 19108), False, 'from copy import copy\n'), ((10836, 10905), 'functools.partial', 'partial', (['setattr', 'self.model.solver.configuration', '"""timeout"""', 'timeout'], {}), "(setattr, self.model.solver.configuration, 'timeout', timeout)\n", (10843, 10905), False, 'from functools import partial\n'), ((10927, 11033), 'functools.partial', 'partial', (['setattr', 'self.model.solver.configuration', '"""timeout"""', 'self.model.solver.configuration.timeout'], {}), "(setattr, self.model.solver.configuration, 'timeout', self.model.\n solver.configuration.timeout)\n", (10934, 11033), False, 'from functools import partial\n'), ((13904, 13942), 'cameo.strain_design.pathway_prediction.util.display_pathway', 'util.display_pathway', (['pathway', 'counter'], {}), '(pathway, counter)\n', (13924, 13942), False, 'from cameo.strain_design.pathway_prediction import util\n'), ((14013, 14030), 'sympy.Add', 'Add', (['*vars_to_cut'], {}), '(*vars_to_cut)\n', (14016, 14030), False, 'from sympy import Add, Mul, RealNumber\n'), ((16951, 16965), 'sympy.RealNumber', 'RealNumber', (['(-1)'], {}), '(-1)\n', (16961, 16965), False, 'from sympy import Add, Mul, RealNumber\n'), ((17027, 17041), 'sympy.RealNumber', 'RealNumber', (['(-1)'], {}), '(-1)\n', (17037, 17041), False, 'from sympy import Add, Mul, RealNumber\n'), ((17105, 17137), 'sympy.RealNumber', 'RealNumber', (['reaction.lower_bound'], {}), '(reaction.lower_bound)\n', (17115, 17137), False, 'from sympy import Add, Mul, RealNumber\n'), ((17177, 17209), 'sympy.RealNumber', 'RealNumber', (['reaction.upper_bound'], {}), '(reaction.upper_bound)\n', (17187, 17209), False, 'from sympy import Add, Mul, RealNumber\n'), ((14326, 14369), 'functools.partial', 'partial', (['self.model.solver.add', 'integer_cut'], {}), '(self.model.solver.add, integer_cut)\n', (14333, 14369), False, 'from functools import partial\n'), ((14396, 14442), 'functools.partial', 'partial', (['self.model.solver.remove', 'integer_cut'], {}), '(self.model.solver.remove, integer_cut)\n', (14403, 14442), False, 'from functools import partial\n'), ((14655, 14709), 'cameo.fba', 'fba', (['self.original_model'], {'objective': 'pathway.product.id'}), '(self.original_model, objective=pathway.product.id)\n', (14658, 14709), False, 'from cameo import fba\n')]
|
from machine import Pin
from neopixel import NeoPixel
from Blocky.Pin import getPin
from Blocky.Timer import *
from time import sleep_ms
class Indicator :
def __init__ (self):
self.animation = ''
self.color = (0,0,0) # color that user set
self.fcolor = [0,0,0] # color that the handler use
self.rgb = NeoPixel(Pin(5) , 1 , timing = True )
def animate (self , type = 'None', color = (0,100,100), speed =10):
self.rgb[0] = color
self.rgb.write()
sleep_ms(1)
self.rgb[0] = (0,0,0)
self.rgb.write()
# Timer has been penalty because of its unstable behaviour
# DEPRECATED
"""
print('set to ' , color , type );self.rgb[0] = (0,0,0)
if type == 'heartbeat':
self.animation = type
self.color = color
def _handler (self):
r,g,b = self.rgb[0]
if (r,g,b) == (0,0,0): self.fcolor = self.color
if (r,g,b) == self.fcolor:self.fcolor = (0,0,0)
d,e,f = self.fcolor
if (r<d): r = r + 1
if (r>d): r = r - 1 if r > 1 else 0
if (g<e): g = g + 1
if (g>e): g = g - 1 if g > 1 else 0
if (b<f): b = b + 1
if (b>f): b = b - 1 if b > 1 else 0
self.rgb[0] = (r,g,b)
self.rgb.write()
AddTask(name = 'sys_led' , function = _handler , mode = 'repeat',time = speed,arg = (self))
elif type == 'pulse':
self.animation = type
self.color = (color[0]//5*5,color[1]//5*5,color[2]//5*5)
self.fcolor = (color[0]//5*5,color[1]//5*5,color[2]//5*5)
self.rgb[0] = (0,0,0)
def _handler (self):
global r,g,b
r,g,b = self.rgb[0]
if (r,g,b) == self.color:self.fcolor = (0,0,0)
d,e,f = self.fcolor
if (r<d): r = r + 5
if (r>d): r = r - 5 if r > 5 else 0
if (g<e): g = g + 5
if (g>e): g = g - 5 if g > 5 else 0
if (b<f): b = b + 5
if (b>f): b = b - 5 if b >5 else 0
self.rgb[0] = (r,g,b)
self.rgb.write()
if self.rgb[0] == (0,0,0):
DeleteTask('sys_led')
AddTask(name = 'sys_led' , function = _handler , mode = 'repeat',time = speed,arg = (self))
else :
DeleteTask('sys_led')
self.rgb[0] = (0,0,0) ; self.rgb.write()
"""
indicator = Indicator()
|
[
"time.sleep_ms",
"machine.Pin"
] |
[((461, 472), 'time.sleep_ms', 'sleep_ms', (['(1)'], {}), '(1)\n', (469, 472), False, 'from time import sleep_ms\n'), ((320, 326), 'machine.Pin', 'Pin', (['(5)'], {}), '(5)\n', (323, 326), False, 'from machine import Pin\n')]
|
"""
Utilities for working with videos, pulling out patches, etc.
"""
import numpy
from pylearn2.compat import OrderedDict
from pylearn2.utils.rng import make_np_rng
__author__ = "<NAME>"
__copyright__ = "Copyright 2011, <NAME> / Universite de Montreal"
__license__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__all__ = ["get_video_dims", "spatiotemporal_cubes"]
def get_video_dims(fname):
"""
Pull out the frame length, spatial height and spatial width of
a video file using ffmpeg.
Parameters
----------
fname : str
Path to video file to be inspected.
Returns
-------
shape : tuple
The spatiotemporal dimensions of the video
(length, height, width).
"""
try:
import pyffmpeg
except ImportError:
raise ImportError("This function requires pyffmpeg "
"<http://code.google.com/p/pyffmpeg/>")
mp = pyffmpeg.FFMpegReader()
try:
mp.open(fname)
tracks = mp.get_tracks()
for track in tracks:
if isinstance(track, pyffmpeg.VideoTrack):
break
else:
raise ValueError('no video track found')
return (track.duration(),) + track.get_orig_size()
finally:
mp.close()
class FrameLookup(object):
"""
Class encapsulating the logic of turning a frame index into a
collection of files into the frame index of a specific video file.
Item-indexing on this object will yield a (filename, nframes, frame_no)
tuple, where nframes is the number of frames in the given file
(mainly for checking that we're far enough from the end so that we
can sample a big enough chunk).
Parameters
----------
names_ang_lengths : WRITEME
"""
def __init__(self, names_and_lengths):
self.files, self.lengths = zip(*names_and_lengths)
self.terminals = numpy.cumsum([s[1] for s in names_and_lengths])
def __getitem__(self, i):
idx = (i < self.terminals).nonzero()[0][0]
frame_no = i
if idx > 0:
frame_no -= self.terminals[idx - 1]
return self.files[idx], self.lengths[idx], frame_no
def __len__(self):
return self.terminals[-1]
def __iter__(self):
raise TypeError('iteration not supported')
def spatiotemporal_cubes(file_tuples, shape, n_patches=numpy.inf, rng=None):
"""
Generator function that yields a stream of (filename, slicetuple)
representing a spatiotemporal patch of that file.
Parameters
----------
file_tuples : list of tuples
Each element should be a 2-tuple consisting of a filename
(or arbitrary identifier) and a (length, height, width)
shape tuple of the dimensions (number of frames in the video,
height and width of each frame).
shape : tuple
A shape tuple consisting of the desired (length, height, width)
of each spatiotemporal patch.
n_patches : int, optional
The number of patches to generate. By default, generates patches
infinitely.
rng : RandomState object or seed, optional
The random number generator (or seed) to use. Defaults to None,
meaning it will be seeded from /dev/urandom or the clock.
Returns
-------
generator : generator object
A generator that yields a stream of (filename, slicetuple) tuples.
The slice tuple is such that it indexes into a 3D array containing
the entire clip with frames indexed along the first axis, rows
along the second and columns along the third.
"""
frame_lookup = FrameLookup([(a, b[0]) for a, b in file_tuples])
file_lookup = OrderedDict(file_tuples)
patch_length, patch_height, patch_width = shape
done = 0
rng = make_np_rng(rng, which_method="random_integers")
while done < n_patches:
frame = numpy.random.random_integers(0, len(frame_lookup) - 1)
filename, file_length, frame_no = frame_lookup[frame]
# Check that there is a contiguous block of frames starting at
# frame_no that is at least as long as our desired cube length.
if file_length - frame_no < patch_length:
continue
_, video_height, video_width = file_lookup[filename][:3]
# The last row and column in which a patch could "start" to still
# fall within frame.
last_row = video_height - patch_height
last_col = video_width - patch_width
row = numpy.random.random_integers(0, last_row)
col = numpy.random.random_integers(0, last_col)
patch_slice = (slice(frame_no, frame_no + patch_length),
slice(row, row + patch_height),
slice(col, col + patch_width))
done += 1
yield filename, patch_slice
|
[
"pylearn2.utils.rng.make_np_rng",
"pylearn2.compat.OrderedDict",
"pyffmpeg.FFMpegReader",
"numpy.cumsum",
"numpy.random.random_integers"
] |
[((930, 953), 'pyffmpeg.FFMpegReader', 'pyffmpeg.FFMpegReader', ([], {}), '()\n', (951, 953), False, 'import pyffmpeg\n'), ((3701, 3725), 'pylearn2.compat.OrderedDict', 'OrderedDict', (['file_tuples'], {}), '(file_tuples)\n', (3712, 3725), False, 'from pylearn2.compat import OrderedDict\n'), ((3801, 3849), 'pylearn2.utils.rng.make_np_rng', 'make_np_rng', (['rng'], {'which_method': '"""random_integers"""'}), "(rng, which_method='random_integers')\n", (3812, 3849), False, 'from pylearn2.utils.rng import make_np_rng\n'), ((1906, 1953), 'numpy.cumsum', 'numpy.cumsum', (['[s[1] for s in names_and_lengths]'], {}), '([s[1] for s in names_and_lengths])\n', (1918, 1953), False, 'import numpy\n'), ((4499, 4540), 'numpy.random.random_integers', 'numpy.random.random_integers', (['(0)', 'last_row'], {}), '(0, last_row)\n', (4527, 4540), False, 'import numpy\n'), ((4555, 4596), 'numpy.random.random_integers', 'numpy.random.random_integers', (['(0)', 'last_col'], {}), '(0, last_col)\n', (4583, 4596), False, 'import numpy\n')]
|
import colorful as cf
from dbmanagement.manager.category_manager import CategoryManager
from dbmanagement.manager.favorite_manager import favorite_manager
from dbmanagement.manager.product_manager import product_manager
from .state import State
class StartMenu(State):
"""The first State with the main menu. Two options are possibles."""
def __init__(self):
self.menu = {
1: CategoryMenu,
2: FavMenu,
}
def show(self):
print("\n \n"
"1. Search for food you which to substitute\n"
"2. Find my saved favorites healthy food\n")
def on_event(self, event):
if event in ("strtmnu", "bck", "r", "add_fav"):
return StartMenu()
else:
return self.menu.get(event, lambda: "")()
class CategoryMenu(State):
"""The follow up state after selecting the first menu. Now selecting which
category to choose"""
def __init__(self):
self.menu = self.get_random_cat()
def show(self):
print("\n \n")
for item in self.menu:
print(f"{item}. {self.menu[item]}")
def on_event(self, event):
if event == "bck":
return self
elif event in self.menu.keys():
return ProductMenu(self.menu[event])
return self
def get_random_cat(self):
cat_list = CategoryManager.get_cat()
return cat_list
class ProductMenu(State):
"""State where the user select an unhealthy product from selected category
in precedent state"""
def __init__(self, selected_cat):
self.uh_barcode = {}
self.temp = {}
self.selected_cat = selected_cat
self.menu = self.get_product_by_category()
def show(self):
print("\n \n")
for item in self.menu:
print(f"{item}. {self.menu[item]}")
def on_event(self, event):
if event == "bck":
return self
elif event in self.menu.keys():
return SubProductMenu(
self.menu[event], self.selected_cat, self.uh_barcode)
return self
def get_product_by_category(self):
prod_list = product_manager.get_unhealthy_prod_by_category(
self.selected_cat)
for enum, (barcode, name) in enumerate(prod_list.items()):
enum += 1
self.uh_barcode[enum] = barcode
self.temp[enum] = name
return self.temp
class SubProductMenu(State):
"""State where the substitute product list is displayed in regards of
selected category and unhealthy product"""
def __init__(self, selected_prod, selected_cat, uh_barcode):
self.uh_barcode = uh_barcode
self.selected_prod = selected_prod
self.selected_cat = selected_cat
self.barcode = {}
self.temp = {}
self.menu = self.get_healthier_product()
def show(self):
print("\n \n \n"
"Here is a list of much better food than the one you selected")
# for enum, (barcode, name) in enumerate(self.menu.items()):
# print(f"{enum}. {self.menu[barcode]}")
for item in self.menu:
print(f"{item}. {self.menu[item]}")
print("\n "
"Write a number to see the detail of the product")
def on_event(self, event):
if event in self.menu.keys():
return ShowProduct(event, self.barcode[event], self.selected_prod,
self.selected_cat, self.uh_barcode)
else:
return self
def get_healthier_product(self):
prod_list = product_manager.get_healthier_product_by_category(
self.selected_cat)
for enum, (barcode, name) in enumerate(prod_list.items()):
enum += 1
self.barcode[enum] = barcode
self.temp[enum] = name
return self.temp
class ShowProduct(State):
"""State where the description of the selected substitued product
is diplayed, with the option to save it"""
def __init__(self, event, barcode, prod, cat, uh_barcode):
self.uh_barcode = uh_barcode
self.barcode = barcode
self.event = event
self.product_name = prod
self.category = cat
self.full_product = self.get_product()
def show(self):
print(f"\n \n \n"
f"{self.full_product['product_name']} is a better food as it has"
f" a nutriscore graded {self.full_product['nutriscore']}."
f"\n You can buy it in these stores : "
f"{self.full_product['store_name']}"
f"\n For more information visit this url : "
f"{self.full_product['url']}")
print(cf.white("\n If you want to " + cf.red('save', nested=True) +
" this food into your favorite, "
+ cf.red('press "s" ', nested=True) + "\n \n \n"))
def on_event(self, event):
if event == "add_fav":
self.save_product_into_fav()
return StartMenu()
elif event == "bck":
return SubProductMenu(
self.product_name, self.category, self.uh_barcode)
def get_product(self):
product = product_manager.get_product_by_barcode(self.barcode)
return product
def save_product_into_fav(self):
favorite_manager.save_healthy_product_to_favorite(self.event,
self.uh_barcode,
self.full_product)
print("Your favorite substitute have been saved")
class FavMenu(State):
"""The Sate where you can access the list of favorites products"""
def __init__(self):
self.fav_name = {}
self.fav_barcode = {}
self.menu = self.get_fav_by_barcode()
def show(self):
print("\n \n")
for item in self.menu:
print(f"{item}. {self.menu[item][0]} remplace {self.menu[item][3]}")
pass
def on_event(self, event):
if event == "bck":
return StartMenu()
else:
try:
return ShowFavProduct(event, self.fav_barcode[event][0])
except KeyError:
return StartMenu()
def get_fav_by_barcode(self):
self.fav_name, self.fav_barcode = favorite_manager.get_all_favorite()
return self.fav_name
def get_product(self):
pass
class ShowFavProduct(State):
def __init__(self, event, barcode):
self.event = event
self.barcode = barcode
self.full_product = self.get_product()
def show(self):
print(f"\n \n \n"
f"{self.full_product['product_name']} is a better food as it has"
f" a nutriscore graded {self.full_product['nutriscore']}."
f"\n You can buy it in these stores : "
f"{self.full_product['store_name']}"
f"\n For more information visit this url : "
f"{self.full_product['url']}")
def event(self, event):
if event == "bck":
return FavMenu()
else:
return ShowFavProduct(event, self.barcode)
pass
def get_product(self):
product = product_manager.get_product_by_barcode(self.barcode)
return product
|
[
"dbmanagement.manager.favorite_manager.favorite_manager.get_all_favorite",
"dbmanagement.manager.product_manager.product_manager.get_healthier_product_by_category",
"dbmanagement.manager.favorite_manager.favorite_manager.save_healthy_product_to_favorite",
"colorful.red",
"dbmanagement.manager.category_manager.CategoryManager.get_cat",
"dbmanagement.manager.product_manager.product_manager.get_unhealthy_prod_by_category",
"dbmanagement.manager.product_manager.product_manager.get_product_by_barcode"
] |
[((1371, 1396), 'dbmanagement.manager.category_manager.CategoryManager.get_cat', 'CategoryManager.get_cat', ([], {}), '()\n', (1394, 1396), False, 'from dbmanagement.manager.category_manager import CategoryManager\n'), ((2168, 2233), 'dbmanagement.manager.product_manager.product_manager.get_unhealthy_prod_by_category', 'product_manager.get_unhealthy_prod_by_category', (['self.selected_cat'], {}), '(self.selected_cat)\n', (2214, 2233), False, 'from dbmanagement.manager.product_manager import product_manager\n'), ((3600, 3668), 'dbmanagement.manager.product_manager.product_manager.get_healthier_product_by_category', 'product_manager.get_healthier_product_by_category', (['self.selected_cat'], {}), '(self.selected_cat)\n', (3649, 3668), False, 'from dbmanagement.manager.product_manager import product_manager\n'), ((5212, 5264), 'dbmanagement.manager.product_manager.product_manager.get_product_by_barcode', 'product_manager.get_product_by_barcode', (['self.barcode'], {}), '(self.barcode)\n', (5250, 5264), False, 'from dbmanagement.manager.product_manager import product_manager\n'), ((5334, 5436), 'dbmanagement.manager.favorite_manager.favorite_manager.save_healthy_product_to_favorite', 'favorite_manager.save_healthy_product_to_favorite', (['self.event', 'self.uh_barcode', 'self.full_product'], {}), '(self.event, self.\n uh_barcode, self.full_product)\n', (5383, 5436), False, 'from dbmanagement.manager.favorite_manager import favorite_manager\n'), ((6335, 6370), 'dbmanagement.manager.favorite_manager.favorite_manager.get_all_favorite', 'favorite_manager.get_all_favorite', ([], {}), '()\n', (6368, 6370), False, 'from dbmanagement.manager.favorite_manager import favorite_manager\n'), ((7239, 7291), 'dbmanagement.manager.product_manager.product_manager.get_product_by_barcode', 'product_manager.get_product_by_barcode', (['self.barcode'], {}), '(self.barcode)\n', (7277, 7291), False, 'from dbmanagement.manager.product_manager import product_manager\n'), ((4851, 4884), 'colorful.red', 'cf.red', (['"""press "s" """'], {'nested': '(True)'}), '(\'press "s" \', nested=True)\n', (4857, 4884), True, 'import colorful as cf\n'), ((4739, 4766), 'colorful.red', 'cf.red', (['"""save"""'], {'nested': '(True)'}), "('save', nested=True)\n", (4745, 4766), True, 'import colorful as cf\n')]
|
#/u/GoldenSights
import sys
import traceback
import time
import datetime
import sqlite3
import json
import praw
'''USER CONFIGURATION'''
"""GENERAL"""
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = "/r/Excel Clippy Office Assistant all-in-one moderator."
# This is a short description of what the bot does.
# For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "Goldtesting"
# This is the sub or list of subs to scan for new posts.
# For a single sub, use "sub1".
# For multiple subreddits, use "sub1+sub2+sub3+..."
PLAY_BOOT_SOUND = True
#Play boot.wav
MAXPOSTS = 100
# How many posts to get from the /new queue at once
WAIT = 30
# The number of seconds between cycles. The bot is completely inactive during
# this time
"""**************"""
"""CLIPPYPOINTS™ """
"""**************"""
POINT_STRING_USR = ["Solution Verified"]
# OP can use this string to award points in his thread.
POINT_STRING_MOD = ["+1 Point"]
# Moderators can use this to give points at any time.
POINT_FLAIR_CSS = "points"
# The CSS class associated with point flair
# Set to "" for none
POINT_REPLY = "You have awarded one point to _parent_"
# This is the phrase that User will receive
# _parent_ will be replaced by the username of the Parent.
POINT_EXEMPT = []
# Any usernames in this list will not receive points.
# Perhaps they have special flair.
POINT_OP_ONLY = True
# Is OP the only person who can give points?
# I recommend setting this to False. Other users might have the same question
# and would like to reward a good answer.
POINT_PER_THREAD = 200
# How many points can be distributed in a single thread?
POINT_DO_EXPLAIN = True
# If the max-per-thread is reached and someone tries to give a point, reply to
# them saying that the max has already been reached
POINT_EXPLAIN = """
Sorry, but %d point(s) have already been distributed in this thread.
This is the maximum allowed at this time.
"""%POINT_PER_THREAD
# If EXPLAINMAX is True, this will be said to someone who tries to give a
# point after max is reached
POINT_EXPLAIN_OP_ONLY = """
Hi!
It looks like you are trying to award a point and you are not the OP!
I am here to assist you!
What would you like help with?
[ClippyPoints^(TM)?](/r/excel/wiki/clippy)
[Flair Descriptions](http://www.reddit.com/r/excel/wiki/index)
"""
"""**************"""
"""FLAIR REMINDER"""
"""**************"""
FLAIR_WARN_DELAY = 86400
# This is the time, IN SECONDS, the user has to reply to the first comment.
# If he does not respond by this time, post is removed
NCDELAY = 172800
FLAIR_WARN_MESSAGE = """
Hi!
It looks like you are trying to ask a question!
Since you have not responded in the last 24 hours, I am here to assist you!
If your questions has been solved, please be sure to update the flair.
Would you like help?
[Help Changing Your
Flair?](https://www.reddit.com/r/excel/wiki/flair)
[Asking Question and Sharing
Data](https://www.reddit.com/r/excel/wiki/sharingquestions)
"""
# This is what the bot tells you when you dont meet the DELAY. Uses reddit's
# usual Markdown formatting
FLAIR_IGNORE_MODS = False
# Do you want the bot to ignore posts made by moderators?
# Use True or False (With capitals! No quotations!)
FLAIR_IGNORE_SELF = False
#Do you want the bot to ignore selfposts?
FLAIR_SOLVED = "solved"
FLAIR_UNSOLVED = "unsolved"
FLAIR_CHAL = "challenge"
FLAIR_MANN = "Mod Announcement"
FLAIR_MNEWS = "Mod News"
FLAIR_WAITING = "Waiting on OP"
FLAIR_DISCUSS = "discussion"
FLAIR_ADVERTISEMENT = "advertisement"
FLAIR_TEMPLATE = "User Template"
FLAIR_PROTIP = "pro tip"
FLAIR_TRIGGERS = ["that works", "perfect", "thank you so much", "huge help",
"figured it out", "got it", "thanks for your help"]
#These encourage OP to change flair / award point
FLAIR_REMINDER = """
Hi!
It looks like you received an answer to your question! Since the top is
still marked as unsolved, I am here to assist you!
If your questions has been solved, please be sure to update the flair.
Would you like help?
[Help Changing Your Flair?](http://www.reddit.com/r/excel/wiki/index)
[Flair Descriptions](http://www.reddit.com/r/excel/wiki/index)
"""
"""******************"""
"""FUNCTION REFERENCE"""
"""******************"""
DICT_TRIGGER = "clippy: "
# The trigger phrase for perfoming a lookup
DICT_FILE = 'reference.txt'
# The file with the Keys/Values
DICT_RESULT_FORM = "_value_"
# This is the form that the result will take
# You may use _key_ and _value_ to inject the key/value from the dict.
# You may delete one or both of these injectors.
DICT_LEVENSHTEIN = False
# If this is True it will use a function that is slow but can find
# misspelled keys
# If this is False it will use a simple function that is very fast but can
# only find keys which are spelled exactly
DICT_FAIL = """
Hi! It looks like you're looking for help with an Excel function!
Unfortunately I have not learned that function yet. If you'd like to
change that, [message the
moderators](http://www.reddit.com/message/compose?to=%2Fr%2Fexcel)!
"""
# The comment which is created when a function is requested
# but not in the file
"""***************"""
"""WELCOME MESSAGE"""
"""***************"""
WELCOME_SUBJECT = """Welcome to /r/Excel, I am here to help!"""
WELCOME_MESSAGE = """
Hi %s!
It looks like you are new to posting in /r/Excel.
Did you know we have a few ways to help you receive better help?
How can I help you?
[How to Share Your Questions](/r/excel/wiki/sharingquestions)
[Changing Link Flair](/r/excel/wiki/flair)
[ClippyPoints^TM](/r/excel/wiki/clippy)
^This ^message ^is ^auto-generated ^and ^is ^not ^monitored ^on ^a
^regular ^basis, ^replies ^to ^this ^message ^may ^not ^go ^answered.
^Remember ^to [^contact ^the
^moderators](http://www.reddit.com/message/compose?to=%2Fr%2Fexcel)
^to ^guarantee ^a ^response
"""
# Sent to the user if he has created his first post in the subreddit
'''All done!'''
class ClippyPoints:
def incrementflair(self, subreddit, username):
#Returns True if the operation was successful
if isinstance(subreddit, str):
subreddit = r.get_subreddit(subreddit)
success = False
print('\t\tChecking flair for ' + username)
flairs = subreddit.get_flair(username)
flairs = flairs['flair_text']
if flairs is not None and flairs != '':
print('\t\t:' + flairs)
try:
flairs = int(flairs)
flairs += 1
flairs = str(flairs)
success = True
except ValueError:
print('\t\tCould not convert flair to a number.')
else:
print('\t\tNo current flair. 1 point')
flairs = '1'
success = True
if success:
print('\t\tAssigning Flair: ' + flairs)
subreddit.set_flair(username, flair_text=flairs,
flair_css_class=POINT_FLAIR_CSS)
return success
def receive(self, comments):
print('\tClippyPoints received comments.')
subreddit = r.get_subreddit(SUBREDDIT)
for comment in comments:
cid = comment.id
cur.execute('SELECT * FROM clippy_points WHERE ID=?', [cid])
if not cur.fetchone():
print(cid)
cbody = comment.body.lower()
try:
if not comment.is_root:
cauthor = comment.author.name
print('\tChecking subreddit moderators')
moderators = [user.name for user in subreddit.get_moderators()]
byuser = False
if cauthor not in moderators and any(flag.lower() in cbody for flag in POINT_STRING_USR):
byuser = True
if byuser or (
(cauthor in moderators and any(flag.lower() in cbody for flag in POINT_STRING_MOD))):
print('\tFlagged %s.' % cid)
print('\t\tFetching parent and Submission data.')
parentcom = r.get_info(thing_id=comment.parent_id)
pauthor = parentcom.author.name
op = comment.submission.author.name
opid = comment.submission.id
if pauthor != cauthor:
if not any(exempt.lower() == pauthor.lower() for exempt in POINT_EXEMPT):
if POINT_OP_ONLY is False or cauthor == op or cauthor in moderators:
cur.execute('SELECT * FROM clippy_points_s WHERE ID=?', [opid])
fetched = cur.fetchone()
if not fetched:
cur.execute('INSERT INTO clippy_points_s VALUES(?, ?)', [opid, 0])
fetched = 0
else:
fetched = fetched[1]
if fetched < POINT_PER_THREAD:
if self.incrementflair(subreddit, pauthor):
print('\t\tWriting reply')
comment_confirm = comment.reply(POINT_REPLY.replace('_parent_', pauthor))
comment_confirm.distinguish()
cur.execute('UPDATE clippy_points_s SET count=? WHERE ID=?', [fetched+1, opid])
if byuser:
comment.submission.set_flair(flair_text=FLAIR_SOLVED, flair_css_class="solvedcase")
else:
print('\t\tMaxPerThread has been reached')
if EXPLAINMAX is True:
print('\t\tWriting reply')
comment.reply(POINT_EXPLAIN)
else:
print('\tOther users cannot give points.')
#comment_confirm = comment.reply(EXPLAINOPONLY)
#comment_confirm.distinguish()
else:
print('\t\tParent is on the exempt list.')
else:
print('\t\tCannot give points to self.')
else:
print('\t\tRoot comment. Ignoring.')
except AttributeError:
print('\t\tCould not fetch usernames. Cannot proceed.')
cur.execute('INSERT INTO clippy_points VALUES(?)', [cid])
sql.commit()
print('\tClippyPoints finished')
class ClippyFlairReminder:
def receive(self, posts):
print('\tClippyFlair received submissions')
now = datetime.datetime.now()
subreddit = r.get_subreddit(SUBREDDIT)
print('\tChecking subreddit moderators')
moderators = [user.name for user in subreddit.get_moderators()]
for post in posts:
found = False
ctimes = []
pid = post.id
try:
pauthor = post.author.name
except AttributeError:
pauthor = '[deleted]'
ptime = post.created_utc
curtime = getTime(True)
ctime = curtime
cur.execute('SELECT * FROM clippy_flair WHERE id=?', [pid])
if not cur.fetchone():
if post.is_self is False or FLAIR_IGNORE_SELF is False:
if pauthor not in moderators or FLAIR_IGNORE_MODS is False:
comments = praw.helpers.flatten_tree(post.comments)
try:
flair = post.link_flair_text.lower()
except AttributeError:
flair = ''
if flair == FLAIR_UNSOLVED.lower():
print(pid + ': Unsolved')
for comment in comments:
try:
cauthor = comment.author.name
except AttributeError:
cauthor = '[deleted]'
if cauthor != pauthor:
found = True
break
if not found:
print('\tNo comments by another user. No action taken.')
else:
print('\tFound comment by other user. Marking as Waiting.')
post.set_flair(flair_text=FLAIR_WAITING, flair_css_class="waitingonop")
elif flair == FLAIR_WAITING.lower():
print(pid + ': Waiting')
for comment in comments:
try:
cauthor = comment.author.name
except AttributeError:
cauthor = '[deleted]'
if cauthor == pauthor:
found = True
pbody = comment.body.lower()
else:
ctimes.append(comment.created_utc)
if found is True:
if not any(trigger in pbody for trigger in POINT_STRING_USR):
print('\tFound comment by OP. All clear, changing flair back to unsolved.')
post.set_flair(flair_text=FLAIR_UNSOLVED, flair_css_class="notsolvedcase")
#print('\tUpvoting comment..')
#post.upvote()
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if any(key.lower() in pbody for key in FLAIR_TRIGGERS):
print('Replying to ' + pid + ' by ' + pauthor)
comment.reply(FLAIR_REMINDER)
newcomment.distinguish()
elif found is False and len(ctimes) > 0:
print('\tNo comments by OP. Checking time limit.')
ctime = min(ctimes)
difference = curtime - ctime
if difference > FLAIR_WARN_DELAY:
print('\tTime is up.')
print('\tLeaving Comment')
newcomment = post.add_comment(FLAIR_WARN_MESSAGE)
print('\tDistinguishing Comment')
newcomment.distinguish()
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
else:
differences = str('%.0f' % (FLAIR_WARN_DELAY - difference))
print('\tStill has ' + differences + 's.')
elif found is False and len(ctimes) == 0:
print('\tNo comments by OP, but no other comments are available.')
else:
print(pid + ': Neither flair')
if flair == FLAIR_DISCUSS.lower():
print(pid + ': is a discussion post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_ADVERTISEMENT.lower():
print(pid + ': is an advertisement post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_TEMPLATE.lower():
print(pid + ': is a User Template post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_PROTIP.lower():
print(pid + ': is a ProTip post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_SOLVED.lower():
print(pid + ': is a SOLVED post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_MANN.lower():
print(pid + ': is a Mod Annoucement post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_MNEWS.lower():
print(pid + ': is a Mod News post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
else:
cur.execute('SELECT * FROM clippy_flair WHERE id=?', [pid])
if not cur.fetchone():
print('\tAssigning Flair')
post.set_flair(flair_text=FLAIR_UNSOLVED, flair_css_class="notsolvedcase")
else:
#cur.execute('INSERT INTO flair VALUES("%s")' % pid)
if pauthor in moderators and FLAIR_IGNORE_MODS is True:
print(pid + ', ' + pauthor + ': Ignoring Moderator')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if post.is_self is True and FLAIR_IGNORE_SELF is True:
print(pid + ', ' + pauthor + ': Ignoring Selfpost')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
sql.commit()
print('\tClippyFlair finished')
class ClippyReference:
def __init__(self):
with open(DICT_FILE, 'r') as f:
self.DICT = json.loads(f.read())
def levenshtein(self, s1, s2):
#Levenshtein algorithm to figure out how close two strings are two each other
#Courtesy http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
if len(s1) < len(s2):
return self.levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def findsuper(self, comment, tolerance= 1):
results = []
used = []
for itemname in self.DICT:
itemlength = len(itemname.split())
pos = 0
commentsplit = comment.split()
end = False
while not end:
try:
gram = commentsplit[pos:pos+itemlength]
gramjoin = ' '.join(gram)
lev = self.levenshtein(itemname, gramjoin)
if lev <= tolerance:
if itemname not in used:
used.append(itemname)
result = DICT_RESULT_FORM
result = result.replace('_key_', itemname)
result = result.replace('_value_', self.DICT[itemname])
results.append(result)
pos += 1
if pos > len(commentsplit):
end = True
except IndexError:
end = True
return results
def findsimple(self, comment):
results = []
for itemname in self.DICT:
if itemname.lower() in comment.lower():
result = DICT_RESULT_FORM
result = result.replace('_key_', itemname)
result = result.replace('_value_', self.DICT[itemname])
results.append(result)
return results
def receive(self, comments):
lev = "True" if DICT_LEVENSHTEIN else "False"
print('\tClippyReference received comments (Lev: %s)'%lev)
for comment in comments:
results = []
cid = comment.id
try:
cauthor = comment.author.name
cur.execute('SELECT * FROM clippy_reference WHERE ID=?',[cid])
if not cur.fetchone():
print('\t' + cid)
if cauthor.lower() != r.user.name.lower():
cbody = comment.body.lower()
if DICT_LEVENSHTEIN is True:
results = self.findsuper(cbody)
else:
results = self.findsimple(cbody)
if DICT_TRIGGER.lower() in cbody.lower() and (
len(results) == 0):
#They made a request, but we didn't find anything
results.append(DICT_FAIL)
if len(results) > 0:
newcomment = '\n\n'.join(results)
print('\t\tReplying to %s with %d items...'%
(cauthor, len(results)), end="")
sys.stdout.flush()
comment.reply(newcomment)
print('done.')
else:
#Will not reply to self
pass
cur.execute('INSERT INTO clippy_reference VALUES(?)',[cid])
sql.commit()
except AttributeError:
# Comment Author is deleted
pass
print('\tClippyReference finished')
class ClippyWelcome:
def receive(self, posts):
print('\tClippyWelcome received submissions')
for post in posts:
try:
pauthor = post.author.name
pid = post.id
cur.execute('SELECT * FROM clippy_welcome WHERE NAME=?', [pauthor])
if not cur.fetchone():
print('\t' + pid)
print('\t\tFound new user: ' + pauthor)
print('\t\tSending message...', end="")
sys.stdout.flush()
#r.send_message(pauthor, WELCOME_SUBJECT, WELCOME_MESSAGE%pauthor, captcha=None)
cur.execute('INSERT INTO clippy_welcome VALUES(?, ?)', (pauthor, pid))
print('done.')
sql.commit()
except AttributeError:
#Post author is deleted
pass
print('\tClippyWelcome finished')
def getTime(bool):
timeNow = datetime.datetime.now(datetime.timezone.utc)
timeUnix = timeNow.timestamp()
if bool is False:
return timeNow
else:
return timeUnix
def clippy_manager():
try:
subreddit = r.get_subreddit(SUBREDDIT)
print('Getting new comments')
newcomments =list( subreddit.get_comments(limit=MAXPOSTS))
clippyreference.receive(newcomments)
clippypoints.receive(newcomments)
print('Getting new submissions')
newposts = list(subreddit.get_new(limit=MAXPOSTS))
clippywelcome.receive(newposts)
clippyflair.receive(newposts)
except Exception:
traceback.print_exc()
if __name__ == "__main__":
sql = sqlite3.connect('superclippy.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS clippy_welcome(NAME TEXT, ID TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_reference(ID TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_points(ID TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_points_s(ID TEXT, count INT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_flair(id TEXT)')
print('Loaded SQL Database')
sql.commit()
if PLAY_BOOT_SOUND:
try:
import winsound
import threading
def bootsound():
winsound.PlaySound('boot.wav', winsound.SND_FILENAME)
soundthread = threading.Thread(target=bootsound)
soundthread.daemon = True
soundthread.start()
except Exception:
pass
print('Logging in...', end="")
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
sys.stdout.flush()
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
print('done.')
print('Starting Points...', end="")
clippypoints = ClippyPoints()
print('done.')
print('Starting Welcome...', end="")
clippywelcome = ClippyWelcome()
print('done.')
print('Starting Flair...', end="")
clippyflair = ClippyFlairReminder()
print('done.')
print('Starting Reference...', end="")
clippyreference = ClippyReference()
print('done.')
while True:
clippy_manager()
print('Sleeping %d seconds.\n\n'%WAIT)
time.sleep(WAIT)
|
[
"threading.Thread",
"traceback.print_exc",
"praw.helpers.flatten_tree",
"winsound.PlaySound",
"time.sleep",
"sqlite3.connect",
"sys.stdout.flush",
"praw.Reddit",
"datetime.datetime.now"
] |
[((18652, 18696), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (18673, 18696), False, 'import datetime\n'), ((19260, 19293), 'sqlite3.connect', 'sqlite3.connect', (['"""superclippy.db"""'], {}), "('superclippy.db')\n", (19275, 19293), False, 'import sqlite3\n'), ((20095, 20113), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (20111, 20113), False, 'import sys\n'), ((20119, 20141), 'praw.Reddit', 'praw.Reddit', (['USERAGENT'], {}), '(USERAGENT)\n', (20130, 20141), False, 'import praw\n'), ((9619, 9642), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9640, 9642), False, 'import datetime\n'), ((20680, 20696), 'time.sleep', 'time.sleep', (['WAIT'], {}), '(WAIT)\n', (20690, 20696), False, 'import time\n'), ((19203, 19224), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (19222, 19224), False, 'import traceback\n'), ((19879, 19913), 'threading.Thread', 'threading.Thread', ([], {'target': 'bootsound'}), '(target=bootsound)\n', (19895, 19913), False, 'import threading\n'), ((19808, 19861), 'winsound.PlaySound', 'winsound.PlaySound', (['"""boot.wav"""', 'winsound.SND_FILENAME'], {}), "('boot.wav', winsound.SND_FILENAME)\n", (19826, 19861), False, 'import winsound\n'), ((18302, 18320), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (18318, 18320), False, 'import sys\n'), ((10268, 10308), 'praw.helpers.flatten_tree', 'praw.helpers.flatten_tree', (['post.comments'], {}), '(post.comments)\n', (10293, 10308), False, 'import praw\n'), ((17594, 17612), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (17610, 17612), False, 'import sys\n')]
|
from code.BestBuyScraper import BestBuyScraper
import re
# Unit tests for BestBuyScraper.py
# Tests the object construction, In stock and out stock cases
# @author qchen59
InStockUrl = "https://www.bestbuy.com/site/dell-s2721dgf-27-gaming-ips-qhd-freesync-and-g-sync-compatible-monitor-with-hdr-displayport-hdmi/6421624.p?skuId=6421624"
OutOfStockUrl = (
"https://www.bestbuy.com/site/sony-playstation-5-console/6426149.p?skuId=6426149"
)
bestBuyScraper = BestBuyScraper(InStockUrl)
# Tests for object creation, and url variable initiation
def test_init():
"""
Tests if Bestbuy Scraper initializes properly.
"""
assert bestBuyScraper is not None
assert InStockUrl == bestBuyScraper.url
def test_InStock():
"""
Tests if the stock status value is received correctly
for two stock status conditions (In Stock, Should be In Stock)
on www.bestbuy.com.
"""
bestBuyScraper = BestBuyScraper(InStockUrl)
stock_info, cost = bestBuyScraper.job()
assert stock_info == "In Stock", "Should be In Stock"
def test_OutOfStock():
"""
Tests if the stock status value is received correctly
for two stock status conditions (Out of Stock, Should be Out of Stock)
on www.bestbuy.com.
"""
bestBuyScraper = BestBuyScraper(OutOfStockUrl)
stock_info, cost = bestBuyScraper.job()
assert stock_info == "Out of Stock", "Should be Out of Stock"
# if __name__ == "__main__":
# test_init()
# test_InStock()
# test_OutOfStock()
|
[
"code.BestBuyScraper.BestBuyScraper"
] |
[((462, 488), 'code.BestBuyScraper.BestBuyScraper', 'BestBuyScraper', (['InStockUrl'], {}), '(InStockUrl)\n', (476, 488), False, 'from code.BestBuyScraper import BestBuyScraper\n'), ((922, 948), 'code.BestBuyScraper.BestBuyScraper', 'BestBuyScraper', (['InStockUrl'], {}), '(InStockUrl)\n', (936, 948), False, 'from code.BestBuyScraper import BestBuyScraper\n'), ((1270, 1299), 'code.BestBuyScraper.BestBuyScraper', 'BestBuyScraper', (['OutOfStockUrl'], {}), '(OutOfStockUrl)\n', (1284, 1299), False, 'from code.BestBuyScraper import BestBuyScraper\n')]
|
import json
from rest_framework.request import Request
from django.template.loader import get_template
from rest_framework import generics
from api.pdf import render as render_pdf
from django.http import (
HttpResponse
)
class SurveyPdfView(generics.GenericAPIView):
# FIXME - restore authentication?
permission_classes = () # permissions.IsAuthenticated,)
def post(self, request: Request, name=None):
tpl_name = "survey-{}.html".format(name)
# return HttpResponseBadRequest('Unknown survey name')
responses = json.loads(request.POST["data"])
# responses = {'question1': 'test value'}
template = get_template(tpl_name)
html_content = template.render(responses)
if name == "primary":
instruct_template = get_template("instructions-primary.html")
instruct_html = instruct_template.render(responses)
docs = (instruct_html,) + (html_content,) * 4
pdf_content = render_pdf(*docs)
else:
pdf_content = render_pdf(html_content)
response = HttpResponse(content_type="application/pdf")
response["Content-Disposition"] = 'attachment; filename="report.pdf"'
response.write(pdf_content)
return response
|
[
"api.pdf.render",
"django.http.HttpResponse",
"json.loads",
"django.template.loader.get_template"
] |
[((555, 587), 'json.loads', 'json.loads', (["request.POST['data']"], {}), "(request.POST['data'])\n", (565, 587), False, 'import json\n'), ((658, 680), 'django.template.loader.get_template', 'get_template', (['tpl_name'], {}), '(tpl_name)\n', (670, 680), False, 'from django.template.loader import get_template\n'), ((1088, 1132), 'django.http.HttpResponse', 'HttpResponse', ([], {'content_type': '"""application/pdf"""'}), "(content_type='application/pdf')\n", (1100, 1132), False, 'from django.http import HttpResponse\n'), ((794, 835), 'django.template.loader.get_template', 'get_template', (['"""instructions-primary.html"""'], {}), "('instructions-primary.html')\n", (806, 835), False, 'from django.template.loader import get_template\n'), ((984, 1001), 'api.pdf.render', 'render_pdf', (['*docs'], {}), '(*docs)\n', (994, 1001), True, 'from api.pdf import render as render_pdf\n'), ((1043, 1067), 'api.pdf.render', 'render_pdf', (['html_content'], {}), '(html_content)\n', (1053, 1067), True, 'from api.pdf import render as render_pdf\n')]
|
# flake8: noqa
import os
from os.path import join
from rastervision2.core.pipeline import *
from rastervision2.core.backend import *
from rastervision2.core.data import *
from rastervision2.core.analyzer import *
from rastervision2.pytorch_backend import *
from rastervision2.pytorch_learner import *
from rastervision2.examples.utils import get_scene_info, save_image_crop
def get_config(runner, test=False):
if runner in ['inprocess']:
raw_uri = '/opt/data/raw-data/isprs-potsdam/'
processed_uri = '/opt/data/examples/potsdam/processed-data'
root_uri = '/opt/data/examples/potsdam/local-output/'
else:
raw_uri = 's3://raster-vision-raw-data/isprs-potsdam'
processed_uri = 's3://raster-vision-lf-dev/examples/potsdam/processed-data'
root_uri = 's3://raster-vision-lf-dev/examples/potsdam/remote-output'
train_ids = [
'2-10', '2-11', '3-10', '3-11', '4-10', '4-11', '4-12', '5-10', '5-11',
'5-12', '6-10', '6-11', '6-7', '6-9', '7-10', '7-11', '7-12', '7-7',
'7-8', '7-9'
]
val_ids = ['2-12', '3-12', '6-12']
if test:
train_ids = train_ids[0:1]
val_ids = val_ids[0:1]
class_config = ClassConfig(
names=[
'Car', 'Building', 'Low Vegetation', 'Tree', 'Impervious',
'Clutter'
],
colors=[
'#ffff00', '#0000ff', '#00ffff', '#00ff00', '#ffffff', '#ff0000'
])
def make_scene(id):
id = id.replace('-', '_')
raster_uri = '{}/4_Ortho_RGBIR/top_potsdam_{}_RGBIR.tif'.format(
raw_uri, id)
label_uri = '{}/5_Labels_for_participants/top_potsdam_{}_label.tif'.format(
raw_uri, id)
if test:
crop_uri = join(processed_uri, 'crops',
os.path.basename(raster_uri))
label_crop_uri = join(processed_uri, 'crops',
os.path.basename(label_uri))
save_image_crop(
raster_uri,
crop_uri,
label_uri=label_uri,
label_crop_uri=label_crop_uri,
size=600,
vector_labels=False)
raster_uri = crop_uri
label_uri = label_crop_uri
# infrared, red, green
channel_order = [3, 0, 1]
raster_source = RasterioSourceConfig(
uris=[raster_uri], channel_order=channel_order)
# Using with_rgb_class_map because label TIFFs have classes encoded as
# RGB colors.
label_source = SemanticSegmentationLabelSourceConfig(
rgb_class_config=class_config,
raster_source=RasterioSourceConfig(uris=[label_uri]))
# URI will be injected by scene config.
# Using rgb=True because we want prediction TIFFs to be in
# RGB format.
label_store = SemanticSegmentationLabelStoreConfig(
rgb=True, vector_output=[PolygonVectorOutputConfig(class_id=0)])
scene = SceneConfig(
id=id,
raster_source=raster_source,
label_source=label_source,
label_store=label_store)
return scene
dataset = DatasetConfig(
class_config=class_config,
train_scenes=[make_scene(id) for id in train_ids],
validation_scenes=[make_scene(id) for id in val_ids])
train_chip_sz = 300
chip_options = SemanticSegmentationChipOptions(
window_method='sliding', stride=300)
backend = PyTorchSemanticSegmentationConfig(
model=SemanticSegmentationModelConfig(backbone='resnet50'),
solver=SolverConfig(
lr=1e-4,
num_epochs=10,
test_num_epochs=2,
batch_sz=8,
one_cycle=True),
log_tensorboard=True,
run_tensorboard=False)
return SemanticSegmentationConfig(
root_uri=root_uri,
dataset=dataset,
backend=backend,
train_chip_sz=train_chip_sz,
chip_options=chip_options,
debug=test)
|
[
"os.path.basename",
"rastervision2.examples.utils.save_image_crop"
] |
[((1971, 2096), 'rastervision2.examples.utils.save_image_crop', 'save_image_crop', (['raster_uri', 'crop_uri'], {'label_uri': 'label_uri', 'label_crop_uri': 'label_crop_uri', 'size': '(600)', 'vector_labels': '(False)'}), '(raster_uri, crop_uri, label_uri=label_uri, label_crop_uri=\n label_crop_uri, size=600, vector_labels=False)\n', (1986, 2096), False, 'from rastervision2.examples.utils import get_scene_info, save_image_crop\n'), ((1808, 1836), 'os.path.basename', 'os.path.basename', (['raster_uri'], {}), '(raster_uri)\n', (1824, 1836), False, 'import os\n'), ((1930, 1957), 'os.path.basename', 'os.path.basename', (['label_uri'], {}), '(label_uri)\n', (1946, 1957), False, 'import os\n')]
|
import numpy as np
import pickle, os, ntpath, cv2
tot_list = []
img_list = []
label_list = []
pickle_dict = {}
load_path = ''
def _load_data(path):
count = 0
for root, dirs, files in os.walk(path):
for file in files:
imgpath = os.path.join(root, file)
tot_list.append(imgpath)
np.random.shuffle(tot_list)
for img in tot_list:
image = cv2.imread(img)
image = cv2.resize(image, (192, 192), cv2.INTER_CUBIC)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
label = [1, 0] if ntpath.basename(img)[0] == '1' else [0, 1]
img_list.append(image)
label_list.append(label)
count += 1
print('>> 총 {} 데이터 loading !'.format(count))
tot_img = np.asarray(img_list)
pickle_dict[b'labels'] = label_list
pickle_dict[b'data'] = tot_img
with open('database/rgb_data/gelontoxon_data', 'wb') as data:
pickle.dump(pickle_dict, data)
print('>> pickle dumpling 완료 ! ')
if __name__ == '__main__':
path = '/home/kyh/dataset/gelontoxon/'
_load_data(path)
|
[
"pickle.dump",
"numpy.random.shuffle",
"ntpath.basename",
"cv2.cvtColor",
"numpy.asarray",
"os.walk",
"cv2.imread",
"os.path.join",
"cv2.resize"
] |
[((193, 206), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (200, 206), False, 'import pickle, os, ntpath, cv2\n'), ((324, 351), 'numpy.random.shuffle', 'np.random.shuffle', (['tot_list'], {}), '(tot_list)\n', (341, 351), True, 'import numpy as np\n'), ((747, 767), 'numpy.asarray', 'np.asarray', (['img_list'], {}), '(img_list)\n', (757, 767), True, 'import numpy as np\n'), ((394, 409), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (404, 409), False, 'import pickle, os, ntpath, cv2\n'), ((426, 472), 'cv2.resize', 'cv2.resize', (['image', '(192, 192)', 'cv2.INTER_CUBIC'], {}), '(image, (192, 192), cv2.INTER_CUBIC)\n', (436, 472), False, 'import pickle, os, ntpath, cv2\n'), ((489, 527), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (501, 527), False, 'import pickle, os, ntpath, cv2\n'), ((920, 950), 'pickle.dump', 'pickle.dump', (['pickle_dict', 'data'], {}), '(pickle_dict, data)\n', (931, 950), False, 'import pickle, os, ntpath, cv2\n'), ((257, 281), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (269, 281), False, 'import pickle, os, ntpath, cv2\n'), ((555, 575), 'ntpath.basename', 'ntpath.basename', (['img'], {}), '(img)\n', (570, 575), False, 'import pickle, os, ntpath, cv2\n')]
|
from pathlib import Path
import decouple
import django_heroku
import psycopg2
import dj_database_url
from corsheaders.defaults import default_headers
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = decouple.config('DJANGO_DEBUG')
# set SECRET_KEY based on value of DEBUG
if DEBUG:
SECRET_KEY = decouple.config('DJANGO_SECRET_KEY_DEVELOPMENT')
else:
SECRET_KEY = decouple.config('DJANGO_SECRET_KEY_PRODUCTION')
STATIC_ROOT = BASE_DIR / 'static'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'whitenoise',
'users',
'contacts',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
},
}
db_from_env = dj_database_url.config(conn_max_age=600)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Secret for encoding User refresh tokens
REFRESH_TOKEN_SECRET = decouple.config('DJANGO_REFRESH_TOKEN_SECRET')
# Use custom user model for authentication
AUTH_USER_MODEL = 'users.User'
# define default authentication method in DRF
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'users.authentication.SafeJWTAuthentication'
],
'DEFAULT_RENDERER_CLASSES': (
'djangorestframework_camel_case.render.CamelCaseJSONRenderer',
'djangorestframework_camel_case.render.CamelCaseBrowsableAPIRenderer',
# Any other renders
),
'DEFAULT_PARSER_CLASSES': (
# If you use MultiPartFormParser or FormParser, we also have a camel case version
'djangorestframework_camel_case.parser.CamelCaseFormParser',
'djangorestframework_camel_case.parser.CamelCaseMultiPartParser',
'djangorestframework_camel_case.parser.CamelCaseJSONParser',
# Any other parsers
),
}
# define refresh token lifetime
REFRESH_TOKEN_EXPIRY = {
'days': 7,
'hours': 0,
'minutes': 0,
'seconds': 0
}
# define refresh token lifetime
ACCESS_TOKEN_EXPIRY = {
'days': 0,
'hours': 10,
'minutes': 0,
'seconds': 10
}
# to accept cookies via axios
CORS_ALLOW_CREDENTIALS = True
CSRF_COOKIE_HTTPONLY = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SAMESITE='None'
CSRF_COOKIE_SECURE = True
CORS_ALLOWED_ORIGINS = [
'http://localhost:3000',
'http://localhost:8000',
'https://contaxapp.herokuapp.com',
'https://contaxapp.netlify.app',
# other allowed origins...
]
CSRF_TRUSTED_ORIGINS = [
'http://localhost:3000',
'http://localhost:8000',
'https://contaxapp.herokuapp.com',
'contaxapp.netlify.app',
# other allowed origins...
]
ALLOWED_HOSTS = [
'127.0.0.1',
'localhost',
'https://contaxapp.herokuapp.com',
'.netlify.app',
# other allowed hosts...
]
CORS_ALLOW_HEADERS = list(default_headers) + [
'refresh_token',
'withcredentials',
'access-control-allow-origin',
]
django_heroku.settings(locals())
|
[
"decouple.config",
"dj_database_url.config",
"pathlib.Path"
] |
[((343, 374), 'decouple.config', 'decouple.config', (['"""DJANGO_DEBUG"""'], {}), "('DJANGO_DEBUG')\n", (358, 374), False, 'import decouple\n'), ((2216, 2256), 'dj_database_url.config', 'dj_database_url.config', ([], {'conn_max_age': '(600)'}), '(conn_max_age=600)\n', (2238, 2256), False, 'import dj_database_url\n'), ((3351, 3397), 'decouple.config', 'decouple.config', (['"""DJANGO_REFRESH_TOKEN_SECRET"""'], {}), "('DJANGO_REFRESH_TOKEN_SECRET')\n", (3366, 3397), False, 'import decouple\n'), ((444, 492), 'decouple.config', 'decouple.config', (['"""DJANGO_SECRET_KEY_DEVELOPMENT"""'], {}), "('DJANGO_SECRET_KEY_DEVELOPMENT')\n", (459, 492), False, 'import decouple\n'), ((516, 563), 'decouple.config', 'decouple.config', (['"""DJANGO_SECRET_KEY_PRODUCTION"""'], {}), "('DJANGO_SECRET_KEY_PRODUCTION')\n", (531, 563), False, 'import decouple\n'), ((228, 242), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (232, 242), False, 'from pathlib import Path\n')]
|
from flask import Flask, json
companies = [{"id": 1, "name": "Company One"}, {"id": 2, "name": "Company Two"}]
# initialize Flask
# create a Flask object, and assign it to the variable name api.
api = Flask(__name__)
# declare a route for endpoint.
# When a consumer visits /companies using a GET request, the list of two companies will be returned.
@api.route('/companies', methods=['GET'])
def get_companies():
return json.dumps(companies)
# status code wasn’t required because 200 is Flask’s default.
@api.route('/companies', methods=['POST'])
def post_companies():
return json.dumps({"success": True}), 201
if __name__ == '__main__':
api.run()
|
[
"flask.json.dumps",
"flask.Flask"
] |
[((203, 218), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (208, 218), False, 'from flask import Flask, json\n'), ((426, 447), 'flask.json.dumps', 'json.dumps', (['companies'], {}), '(companies)\n', (436, 447), False, 'from flask import Flask, json\n'), ((587, 616), 'flask.json.dumps', 'json.dumps', (["{'success': True}"], {}), "({'success': True})\n", (597, 616), False, 'from flask import Flask, json\n')]
|
import numpy as np
from . import _colormixer
from . import _histograms
import threading
from ...util import img_as_ubyte
# utilities to make life easier for plugin writers.
import multiprocessing
CPU_COUNT = multiprocessing.cpu_count()
class GuiLockError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class WindowManager(object):
''' A class to keep track of spawned windows,
and make any needed callback once all the windows,
are closed.'''
def __init__(self):
self._windows = []
self._callback = None
self._callback_args = ()
self._callback_kwargs = {}
self._gui_lock = False
self._guikit = ''
def _check_locked(self):
if not self._gui_lock:
raise GuiLockError(\
'Must first acquire the gui lock before using this image manager')
def _exec_callback(self):
if self._callback:
self._callback(*self._callback_args, **self._callback_kwargs)
def acquire(self, kit):
if self._gui_lock:
raise GuiLockError(\
'The gui lock can only be acquired by one toolkit per session. \
The lock is already acquired by %s' % self._guikit)
else:
self._gui_lock = True
self._guikit = str(kit)
def _release(self, kit):
# releaseing the lock will lose all references to currently
# tracked images and the callback.
# this function is private for reason!
self._check_locked()
if str(kit) == self._guikit:
self._windows = []
self._callback = None
self._callback_args = ()
self._callback_kwargs = {}
self._gui_lock = False
self._guikit = ''
else:
raise RuntimeError('Only the toolkit that owns the lock may '
'release it')
def add_window(self, win):
self._check_locked()
self._windows.append(win)
def remove_window(self, win):
self._check_locked()
try:
self._windows.remove(win)
except ValueError:
print('Unable to find referenced window in tracked windows.')
print('Ignoring...')
else:
if len(self._windows) == 0:
self._exec_callback()
def register_callback(self, cb, *cbargs, **cbkwargs):
self._check_locked()
self._callback = cb
self._callback_args = cbargs
self._callback_kwargs = cbkwargs
def has_windows(self):
if len(self._windows) > 0:
return True
else:
return False
window_manager = WindowManager()
def prepare_for_display(npy_img):
'''Convert a 2D or 3D numpy array of any dtype into a
3D numpy array with dtype uint8. This array will
be suitable for use in passing to gui toolkits for
image display purposes.
Parameters
----------
npy_img : ndarray, 2D or 3D
The image to convert for display
Returns
-------
out : ndarray, 3D dtype=np.uint8
The converted image. This is guaranteed to be a contiguous array.
Notes
-----
If the input image is floating point, it is assumed that the data
is in the range of 0.0 - 1.0. No check is made to assert this
condition. The image is then scaled to be in the range 0 - 255
and then cast to np.uint8
For all other dtypes, the array is simply cast to np.uint8
If a 2D array is passed, the single channel is replicated
to the 2nd and 3rd channels.
If the array contains an alpha channel, this channel is
ignored.
'''
if npy_img.ndim < 2:
raise ValueError('Image must be 2D or 3D array')
height = npy_img.shape[0]
width = npy_img.shape[1]
out = np.empty((height, width, 3), dtype=np.uint8)
npy_img = img_as_ubyte(npy_img)
if npy_img.ndim == 2 or \
(npy_img.ndim == 3 and npy_img.shape[2] == 1):
npy_plane = npy_img.reshape((height, width))
out[:, :, 0] = npy_plane
out[:, :, 1] = npy_plane
out[:, :, 2] = npy_plane
elif npy_img.ndim == 3:
if npy_img.shape[2] == 3 or npy_img.shape[2] == 4:
out[:, :, :3] = npy_img[:, :, :3]
else:
raise ValueError('Image must have 1, 3, or 4 channels')
else:
raise ValueError('Image must have 2 or 3 dimensions')
return out
def histograms(image, nbins):
'''Calculate the channel histograms of the current image.
Parameters
----------
image : ndarray, ndim=3, dtype=np.uint8
Input image.
nbins : int
The number of bins.
Returns
-------
out : (rcounts, gcounts, bcounts, vcounts)
The binned histograms of the RGB channels and intensity values.
This is a NAIVE histogram routine, meant primarily for fast display.
'''
return _histograms.histograms(image, nbins)
class ImgThread(threading.Thread):
def __init__(self, func, *args):
super(ImgThread, self).__init__()
self.func = func
self.args = args
def run(self):
self.func(*self.args)
class ThreadDispatch(object):
def __init__(self, img, stateimg, func, *args):
height = img.shape[0]
self.cores = CPU_COUNT
self.threads = []
self.chunks = []
if self.cores == 1:
self.chunks.append((img, stateimg))
elif self.cores >= 4:
self.chunks.append((img[:(height // 4), :, :],
stateimg[:(height // 4), :, :]))
self.chunks.append((img[(height // 4):(height // 2), :, :],
stateimg[(height // 4):(height // 2), :, :]))
self.chunks.append((img[(height // 2):(3 * height // 4), :, :],
stateimg[(height // 2):(3 * height // 4), :, :]
))
self.chunks.append((img[(3 * height // 4):, :, :],
stateimg[(3 * height // 4):, :, :]))
# if they don't have 1, or 4 or more, 2 is good.
else:
self.chunks.append((img[:(height // 2), :, :],
stateimg[:(height // 2), :, :]))
self.chunks.append((img[(height // 2):, :, :],
stateimg[(height // 2):, :, :]))
for i in range(len(self.chunks)):
self.threads.append(ImgThread(func, self.chunks[i][0],
self.chunks[i][1], *args))
def run(self):
for t in self.threads:
t.start()
for t in self.threads:
t.join()
class ColorMixer(object):
''' a class to manage mixing colors in an image.
The input array must be an RGB uint8 image.
The mixer maintains an original copy of the image,
and uses this copy to query the pixel data for operations.
It also makes a copy for sharing state across operations.
That is, if you add to a channel, and multiply to same channel,
the two operations are carried separately and the results
averaged together.
it modifies your array in place. This ensures that if you
bust over a threshold, you can always come back down.
The passed values to a function are always considered
absolute. Thus to threshold a channel completely you
can do mixer.add(RED, 255). Or to double the intensity
of the blue channel: mixer.multiply(BLUE, 2.)
To reverse these operations, respectively:
mixer.add(RED, 0), mixer.multiply(BLUE, 1.)
The majority of the backend is implemented in Cython,
so it should be quite quick.
'''
RED = 0
GREEN = 1
BLUE = 2
valid_channels = [RED, GREEN, BLUE]
def __init__(self, img):
if type(img) != np.ndarray:
raise ValueError('Image must be a numpy array')
if img.dtype != np.uint8:
raise ValueError('Image must have dtype uint8')
if img.ndim != 3 or img.shape[2] != 3:
raise ValueError('Image must be 3 channel MxNx3')
self.img = img
self.origimg = img.copy()
self.stateimg = img.copy()
def get_stateimage(self):
return self.stateimg
def commit_changes(self):
self.stateimg[:] = self.img[:]
def revert(self):
self.stateimg[:] = self.origimg[:]
self.img[:] = self.stateimg[:]
def set_to_stateimg(self):
self.img[:] = self.stateimg[:]
def add(self, channel, ammount):
'''Add the specified ammount to the specified channel.
Parameters
----------
channel : flag
the color channel to operate on
RED, GREED, or BLUE
ammount : integer
the ammount of color to add to the channel,
can be positive or negative.
'''
if channel not in self.valid_channels:
raise ValueError('assert_channel is not a valid channel.')
pool = ThreadDispatch(self.img, self.stateimg,
_colormixer.add, channel, ammount)
pool.run()
def multiply(self, channel, ammount):
'''Mutliply the indicated channel by the specified value.
Parameters
----------
channel : flag
the color channel to operate on
RED, GREED, or BLUE
ammount : integer
the ammount of color to add to the channel,
can be positive or negative.
'''
if channel not in self.valid_channels:
raise ValueError('assert_channel is not a valid channel.')
pool = ThreadDispatch(self.img, self.stateimg,
_colormixer.multiply, channel, ammount)
pool.run()
def brightness(self, factor, offset):
'''Adjust the brightness off an image with an offset and factor.
Parameters
----------
offset : integer
The ammount to add to each channel.
factor : float
The factor to multiply each channel by.
result = clip((pixel + offset)*factor)
'''
pool = ThreadDispatch(self.img, self.stateimg,
_colormixer.brightness, factor, offset)
pool.run()
def sigmoid_gamma(self, alpha, beta):
pool = ThreadDispatch(self.img, self.stateimg,
_colormixer.sigmoid_gamma, alpha, beta)
pool.run()
def gamma(self, gamma):
pool = ThreadDispatch(self.img, self.stateimg,
_colormixer.gamma, gamma)
pool.run()
def hsv_add(self, h_amt, s_amt, v_amt):
'''Adjust the H, S, V channels of an image by a constant ammount.
This is similar to the add() mixer function, but operates over the
entire image at once. Thus all three additive values, H, S, V, must
be supplied simultaneously.
Parameters
----------
h_amt : float
The ammount to add to the hue (-180..180)
s_amt : float
The ammount to add to the saturation (-1..1)
v_amt : float
The ammount to add to the value (-1..1)
'''
pool = ThreadDispatch(self.img, self.stateimg,
_colormixer.hsv_add, h_amt, s_amt, v_amt)
pool.run()
def hsv_multiply(self, h_amt, s_amt, v_amt):
'''Adjust the H, S, V channels of an image by a constant ammount.
This is similar to the add() mixer function, but operates over the
entire image at once. Thus all three additive values, H, S, V, must
be supplied simultaneously.
Note that since hue is in degrees, it makes no sense to multiply
that channel, thus an add operation is performed on the hue. And the
values given for h_amt, should be the same as for hsv_add
Parameters
----------
h_amt : float
The ammount to to add to the hue (-180..180)
s_amt : float
The ammount to multiply to the saturation (0..1)
v_amt : float
The ammount to multiply to the value (0..1)
'''
pool = ThreadDispatch(self.img, self.stateimg,
_colormixer.hsv_multiply, h_amt, s_amt, v_amt)
pool.run()
def rgb_2_hsv_pixel(self, R, G, B):
'''Convert an RGB value to HSV
Parameters
----------
R : int
Red value
G : int
Green value
B : int
Blue value
Returns
-------
out : (H, S, V) Floats
The HSV values
'''
H, S, V = _colormixer.py_rgb_2_hsv(R, G, B)
return (H, S, V)
def hsv_2_rgb_pixel(self, H, S, V):
'''Convert an HSV value to RGB
Parameters
----------
H : float
Hue value
S : float
Saturation value
V : float
Intensity value
Returns
-------
out : (R, G, B) ints
The RGB values
'''
R, G, B = _colormixer.py_hsv_2_rgb(H, S, V)
return (R, G, B)
|
[
"numpy.empty",
"multiprocessing.cpu_count"
] |
[((210, 237), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (235, 237), False, 'import multiprocessing\n'), ((3854, 3898), 'numpy.empty', 'np.empty', (['(height, width, 3)'], {'dtype': 'np.uint8'}), '((height, width, 3), dtype=np.uint8)\n', (3862, 3898), True, 'import numpy as np\n')]
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for dealing with the python unittest module."""
import fnmatch
import re
import sys
import unittest
class _TextTestResult(unittest._TextTestResult):
"""A test result class that can print formatted text results to a stream.
Results printed in conformance with gtest output format, like:
[ RUN ] autofill.AutofillTest.testAutofillInvalid: "test desc."
[ OK ] autofill.AutofillTest.testAutofillInvalid
[ RUN ] autofill.AutofillTest.testFillProfile: "test desc."
[ OK ] autofill.AutofillTest.testFillProfile
[ RUN ] autofill.AutofillTest.testFillProfileCrazyCharacters: "Test."
[ OK ] autofill.AutofillTest.testFillProfileCrazyCharacters
"""
def __init__(self, stream, descriptions, verbosity):
unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
self._fails = set()
def _GetTestURI(self, test):
return '%s.%s.%s' % (test.__class__.__module__,
test.__class__.__name__,
test._testMethodName)
def getDescription(self, test):
return '%s: "%s"' % (self._GetTestURI(test), test.shortDescription())
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.stream.writeln('[ RUN ] %s' % self.getDescription(test))
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
self.stream.writeln('[ OK ] %s' % self._GetTestURI(test))
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self.stream.writeln('[ ERROR ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self.stream.writeln('[ FAILED ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def getRetestFilter(self):
return ':'.join(self._fails)
class TextTestRunner(unittest.TextTestRunner):
"""Test Runner for displaying test results in textual format.
Results are displayed in conformance with google test output.
"""
def __init__(self, verbosity=1):
unittest.TextTestRunner.__init__(self, stream=sys.stderr,
verbosity=verbosity)
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def GetTestsFromSuite(suite):
"""Returns all the tests from a given test suite."""
tests = []
for x in suite:
if isinstance(x, unittest.TestSuite):
tests += GetTestsFromSuite(x)
else:
tests += [x]
return tests
def GetTestNamesFromSuite(suite):
"""Returns a list of every test name in the given suite."""
return map(lambda x: GetTestName(x), GetTestsFromSuite(suite))
def GetTestName(test):
"""Gets the test name of the given unittest test."""
return '.'.join([test.__class__.__module__,
test.__class__.__name__,
test._testMethodName])
def FilterTestSuite(suite, gtest_filter):
"""Returns a new filtered tests suite based on the given gtest filter.
See https://github.com/google/googletest/blob/main/docs/advanced.md
for gtest_filter specification.
"""
return unittest.TestSuite(FilterTests(GetTestsFromSuite(suite), gtest_filter))
def FilterTests(all_tests, gtest_filter):
"""Filter a list of tests based on the given gtest filter.
Args:
all_tests: List of tests (unittest.TestSuite)
gtest_filter: Filter to apply.
Returns:
Filtered subset of the given list of tests.
"""
test_names = [GetTestName(test) for test in all_tests]
filtered_names = FilterTestNames(test_names, gtest_filter)
return [test for test in all_tests if GetTestName(test) in filtered_names]
def FilterTestNames(all_tests, gtest_filter):
"""Filter a list of test names based on the given gtest filter.
See https://github.com/google/googletest/blob/main/docs/advanced.md
for gtest_filter specification.
Args:
all_tests: List of test names.
gtest_filter: Filter to apply.
Returns:
Filtered subset of the given list of test names.
"""
pattern_groups = gtest_filter.split('-')
positive_patterns = ['*']
if pattern_groups[0]:
positive_patterns = pattern_groups[0].split(':')
negative_patterns = []
if len(pattern_groups) > 1:
negative_patterns = pattern_groups[1].split(':')
neg_pats = None
if negative_patterns:
neg_pats = re.compile('|'.join(fnmatch.translate(p) for p in
negative_patterns))
tests = []
test_set = set()
for pattern in positive_patterns:
pattern_tests = [
test for test in all_tests
if (fnmatch.fnmatch(test, pattern)
and not (neg_pats and neg_pats.match(test))
and test not in test_set)]
tests.extend(pattern_tests)
test_set.update(pattern_tests)
return tests
|
[
"unittest.TestResult.startTest",
"unittest.TestResult.addSuccess",
"unittest.TextTestRunner.__init__",
"unittest.TestResult.addError",
"unittest._TextTestResult.__init__",
"fnmatch.translate",
"unittest.TestResult.addFailure",
"fnmatch.fnmatch"
] |
[((940, 1012), 'unittest._TextTestResult.__init__', 'unittest._TextTestResult.__init__', (['self', 'stream', 'descriptions', 'verbosity'], {}), '(self, stream, descriptions, verbosity)\n', (973, 1012), False, 'import unittest\n'), ((1361, 1402), 'unittest.TestResult.startTest', 'unittest.TestResult.startTest', (['self', 'test'], {}), '(self, test)\n', (1390, 1402), False, 'import unittest\n'), ((1511, 1553), 'unittest.TestResult.addSuccess', 'unittest.TestResult.addSuccess', (['self', 'test'], {}), '(self, test)\n', (1541, 1553), False, 'import unittest\n'), ((1662, 1707), 'unittest.TestResult.addError', 'unittest.TestResult.addError', (['self', 'test', 'err'], {}), '(self, test, err)\n', (1690, 1707), False, 'import unittest\n'), ((1862, 1909), 'unittest.TestResult.addFailure', 'unittest.TestResult.addFailure', (['self', 'test', 'err'], {}), '(self, test, err)\n', (1892, 1909), False, 'import unittest\n'), ((2311, 2389), 'unittest.TextTestRunner.__init__', 'unittest.TextTestRunner.__init__', (['self'], {'stream': 'sys.stderr', 'verbosity': 'verbosity'}), '(self, stream=sys.stderr, verbosity=verbosity)\n', (2343, 2389), False, 'import unittest\n'), ((4615, 4635), 'fnmatch.translate', 'fnmatch.translate', (['p'], {}), '(p)\n', (4632, 4635), False, 'import fnmatch\n'), ((4838, 4868), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['test', 'pattern'], {}), '(test, pattern)\n', (4853, 4868), False, 'import fnmatch\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
feign blocks module
zs. elter, a. cserkaszky 2019
"""
import os
import math
import re
import numpy as np
import matplotlib.pyplot as plt
from feign.geometry import *
def isFloat(s):
try:
float(s)
return True
except ValueError:
return False
def readMu(path,column,energy):
"""The function to read attenuaton coefficients from XCOM datafiles.
Parameters
----------
path : str
path to the file (str)
column : int
column which contains the total attenuation coefficients in case more
columns are present in the file
energy : float or list of floats
energy or energies where the attenuation coefficient is needed.
Returns
-------
float or list of floats
the interpolated value(s) of the attenuaton coefficient.
"""
try:
inputfile=open(path,'r').readlines()
except FileNotFoundError:
inputfile=open(os.getcwd()+path,'r').readlines()
en=[]
mu=[]
for line in inputfile:
x=line.strip().split()
if len(x)>=1 and isFloat(x[0]):
en.append(float(x[0]))
mu.append(float(x[column]))
return np.interp(energy,en,mu)
def is_hex_color(input_string):
"""The function to assess whether a string is hex color description.
Taken from https://stackoverflow.com/questions/42876366/check-if-a-string-defines-a-color
Parameters
----------
input_string : str
String which may contain hex color description
Returns
-------
bool
True if the string is a hex format definition, False otherwise.
Examples
--------
>>> is_hex_color('notahex')
False
>>> is_hex_color('#FF0000')
True
"""
HEX_COLOR_REGEX = r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$'
regexp = re.compile(HEX_COLOR_REGEX)
if regexp.search(input_string):
return True
return False
def getID(nameKeyword, argList, keywordDict):
"""The function returns 1 string argument from a general list or arguments
The argument can be named with nameKeyword
Parameters
----------
nameKeyword : str
The keyword of the desired argument if named
argList : list
*args passed to this function
keywordDict : dict
**kwargs passed to this function
Returns
-------
str
The found string argument
"""
foundArg = None
if len(argList) == 1 and len(keywordDict) == 0:
foundArg = argList[0]
elif len(argList) == 0 and len(keywordDict) == 1 and nameKeyword in keywordDict:
foundArg = keywordDict[nameKeyword]
if foundArg is not None and isinstance(foundArg, str):
return foundArg
raise ValueError('expected 1 argument: '+nameKeyword+' string')
class Material(object):
"""A class used to represent a Material.
Parameters
----------
matID : str
ID of the material
Attributes
----------
matID : str
ID of the material
density : float
density of material in (g/cm3)
path : str
path to attenuation coefficient file
color : str
color of material when plotting
"""
_idName = 'matID'
def __init__(self, *args, **kwargs):
self._density = None
self._path = None
self._color = None
self._id = getID(self._idName, args, kwargs)
def __repr__(self):
return "Material(matID=%s)" % (self._id)
@property
def density(self):
return self._density
@property
def path(self):
return self._path
@property
def color(self):
return self._color
def set_density(self, density=None):
"""The function to set the density of the Material
Parameters
----------
density : float
density of material in g/cm2
"""
if isFloat(density):
self._density=density
else:
raise ValueError('density has to be float for Material ID="{}"'.format(self._id))
def set_path(self, path=None):
"""The function to set the path to the attenuation data of the Material.
Parameters
----------
path : tuple (str,int)
the path of the file, and the column which contains the data.
"""
if isinstance(path, tuple) and len(path)==2 and isinstance(path[0], str) and isinstance(path[1], int):
self._path=path
else:
raise ValueError(('Path has to be (str,int) tuple for Material ID="{}"'.format(self._id)))
def set_color(self, color=None):
"""The function to set the color of Material in case the geometry is plotted.
Parameters
----------
color : str
color of the material in hex format
"""
if isinstance(color, str) and is_hex_color(color):
self._color=color
else:
raise ValueError(('Color has to be hex str for Material ID="{}"'.format(self._id)))
class Pin(object):
"""A class used to represent a Pin.
With :meth:`Pin.add_region()` coaxial circles can be added to describe
the content (eg. fuel pellet, helium gap, clad).
In case no region is added, the Pin() object will behave as an empty channel
filled with the coolant material.
Parameters
----------
pinID : str
ID of the pin
Attributes
----------
pinID : str
ID of the pin
regions : list of tuples
(Material, radius) pairs to describe coaxial regions within pin, radius in cm.
materials : list of str
list of :attr:`Material.matID` identifiers within the pin
radii : list of floats
list of radii of regions within the pin, radii in cm
"""
_idName = 'pinID'
def __init__(self, *args, **kwargs):
self._regions=[]
self._materials=[]
self._radii=[]
self._id = getID(self._idName, args, kwargs)
def __repr__(self):
return "Pin(pinID=%s)" % (self._id)
@property
def regions(self):
return self._regions
@property
def materials(self):
return self._materials
@property
def radii(self):
return self._radii
def add_region(self,material=None,radius=None):
"""The function to add coaxial circles and rings to a pin.
In case of consecutive calls (ie. more regions added), the radii has to
increase.
Parameters
----------
material : Material
material filled into new region
radius :
radius of new region
Examples
--------
>>> uo2 = Material('1')
>>> he = Material('2')
>>> zr = Material('3')
>>> fuel = Pin('1')
>>> fuel.add_region(uo2,0.41)
>>> fuel.add_region(he,0.42)
>>> fuel.add_region(zr,0.48)
>>> fuel.regions
[(Material(matID=1), 0.41),
(Material(matID=2), 0.42),
(Material(matID=3), 0.48)]
"""
if isinstance(material,Material):
self._materials.append(material._id)
else:
raise TypeError('Material() object is expected')
if isFloat(radius):
if len(self._radii)>0 and self._radii[-1]>=radius:
raise ValueError('Radii are not increasing in pin #{}'.format(self._id))
else:
self._radii.append(radius)
self._regions.append((material,radius))
def checkArgvConsistency(*argv):
if len(argv) >= 2:
for arg in argv[1:]:
if not isinstance(arg, type(argv[0])):
raise TypeError('Inconsistent input objects: '+str(type(arg))+' != '+str(type(argv[0])))
def addIDsToDict(objectDict, *argv):
checkArgvConsistency(*argv)
#add new elements
for arg in argv:
if arg._id in objectDict:
raise ValueError('ID {} is duplicated'.format(arg._id))
else:
objectDict[arg._id]=arg
def delIDsFromDict(objectDict, *argv):
checkArgvConsistency(*argv)
for arg in argv:
if objectDict is None:
raise TypeError('No objects added yet.')
elif arg._id not in objectDict:
print('ID {} is not in dict yet'.format(arg._id))
else:
del objectDict[arg._id]
class Assembly(object):
"""A class used to represent a rectangular Assembly.
Parameters
----------
N : int
number of positions in y direction
M : int
number of positions in x direction
Attributes
----------
N : int
number of positions in y direction
M : int
number of positions in x direction
pitch : float
pitch size of the lattice in cm
pins: Pins()
pins in the assembly
fuelmap: 2D array
fuelmap to describe which pins are filled in the positions
coolant : str
matID of the coolant (ie. materal filled between pins)
pool : Rectangle() (optional)
pool in which the assembly is placed. Within the pool coolant material is
filled, outside the pool surrounding material is filled.
surrounding : str (mandatory if pool is present)
matID of the surrounding material (ie. material filled around pool)
source : list of str
:attr:`Material.matID` identifiers of material emitting gamma particles
"""
def __init__(self,N,M):
try:
self.N=int(N)
self.M=int(M)
except ValueError:
raise ValueError('N,M has to be decimal')
except TypeError:
raise TypeError('N,M has to be int')
self._pitch=None
self._pins=None
self._fuelmap=None
self._coolant=None
self._surrounding=None
self._source=None #TODO what if more materials emit from same pin?
self._pool=None
def __repr__(self):
return "Assembly(N=%d,M=%d)" % (self.N,self.M)
@property
def pitch(self):
return self._pitch
@property
def pool(self):
return self._pool
@property
def pins(self):
return self._pins
@property
def fuelmap(self):
return self._fuelmap
@property
def coolant(self):
return self._coolant
@property
def surrounding(self):
return self._surrounding
@property
def source(self):
return self._source
def set_pitch(self,pitch=None):
"""The function to set the pitch of the lattice of Assembly
Parameters
----------
pitch : float
pitch of lattice in cm
"""
if isFloat(pitch):
self._pitch=pitch
else:
raise TypeError('Pitch has to be float')
def set_pins(self,*argv):
"""The function to include Pin objects in an Assembly
Parameters
----------
*argv : Pin() or more Pin() objects
Pin() objects to be included in the Assembly
Examples
--------
>>> fuel=Pin('1')
>>> guide=Pin('2')
>>> assembly=Assembly(2,2)
>>> assembly.pins
>>> assembly.set_pins(fuel,guide)
>>> assembly.pins
{'1': Pin(pinID=1), '2': Pin(pinID=2)}
Raises
------
TypeError
if the parameter is not Pin()
ValueError
if the Pin is already included
"""
self._pins={}
self.add_pin(*argv)
def add_pin(self,*argv):
"""The function to add Pin objects to an Assembly, which may have
already included pins. If one wants to rewrite the existing pins,
then the :meth:`Assembly.set_pins()` has to be called.
Parameters
----------
*argv : Pin() or more Pin() objects
Pin() objects to be added in the Assembly
Examples
--------
>>> fuel=Pin('1')
>>> guide=Pin('2')
>>> assembly=Assembly(2,2)
>>> assembly.set_pins()
>>> assembly.pins
{}
>>> assembly.add_pin(fuel)
>>> assembly.add_pin(guide)
>>> assembly.pins
{'1': Pin(pinID=1), '2': Pin(pinID=2)}
Raises
------
TypeError
if the parameter is not Pin()
ValueError
if the Pin is already included
"""
if len(argv) > 0 and not isinstance(argv[0],Pin):
raise TypeError('Inputs need to be Pin() objects')
addIDsToDict(self.pins, *argv)
def remove_pin(self,*argv):
"""The function to remove Pin objects from an Assembly which
already has previously included pins.
Parameters
----------
*argv : Pin() or more Pin() objects
Pin() objects to be added in the Assembly
Examples
--------
>>> fuel=Pin('1')
>>> guide=Pin('2')
>>> assembly=Assembly(2,2)
>>> assembly.set_pins(fuel,guide)
>>> assembly.pins
{'1': Pin(pinID=1), '2': Pin(pinID=2)}
>>> assembly.remove_pin(guide)
>>> assembly.pins
{'1': Pin(pinID=1)}
>>> assembly.remove_pin(guide)
ID 2 is not in dict yet
Raises
------
TypeError
if the parameter is not Pin()
ValueError
if the Pin is already included
TypeError
if :attr:`pins` is None.
"""
if len(argv) > 0 and not isinstance(argv[0],Pin):
raise TypeError('Inputs need to be Pin() objects')
delIDsFromDict(self._pins, *argv)
def set_fuelmap(self,fuelmap=None):
"""The function to set the fuelmap of the Assembly
Parameters
----------
fuelmap : 2D array (NxM shape)
fuelmap of the lattice
Example
-------
>>> fuel=Pin('1')
>>> fuel.add_region(uo2,0.5)
>>> fuel.add_region(he,0.51)
>>> fuel.add_region(zr,0.61)
>>> fuelmap=[['1','1'],
['1','1']]
>>> assy=Assembly(2,2)
>>> assy.set_fuelmap(fuelmap)
"""
fuelmap=np.array(fuelmap)
if fuelmap.shape[0] != self.N or fuelmap.shape[1] != self.M:
raise ValueError('Fuelmap has wrong size')
else:
self._fuelmap=fuelmap
def set_coolant(self, coolant=None):
"""The function to set the coolant material in the Assembly
Parameters
----------
coolant : Material()
the coolant material
"""
if isinstance(coolant, Material):
self._coolant=coolant._id
else:
raise TypeError('Material() is expected')
def set_surrounding(self, surrounding=None):
"""The function to set the surrounding material around the Assembly
Parameters
----------
surrounding : Material()
the surrounding material
"""
if isinstance(surrounding, Material):
self._surrounding=surrounding._id
else:
raise TypeError('Material() is expected')
def set_source(self, *args):
"""The function to set the source material(s) in the Assembly
Parameters
----------
*args : Material() instances
the source material(s)
"""
self._source=[]
for arg in args:
if isinstance(arg,Material):
self._source.append(arg._id)
def set_pool(self,pool=None):
"""The function to set the pool around the Assembly
Parameters
----------
pool : Rectangle()
the shape of the pool
"""
if isinstance(pool,Rectangle):
self._pool=pool
else:
raise TypeError('Pool has to be a Rectangle() object')
def checkComplete(self):
"""
The function to check whether everything is defined correctly in an
Assembly() object. Prints messages indicating any problem.
- checks whether any attribute is not defined (pool does not need
to be defined)
- checks whether any pin contains any region with radius greater
than the pitch
- checks whether all the pins in the fuelmap are attributed to
the assembly
- in case a pool is defined, it is checked whether the pool is
around the assembly.
Returns
-------
bool
True if everything is correct and complete, False otherwise
"""
if self.pins is None or self.pitch is None or \
self.coolant is None or self.fuelmap is None or \
self.source is None:
print('ERROR: Assembly is not complete.')
return False
if False in [r<=self.pitch/2 for pin in self.pins.values() for r in pin._radii]:
print('ERROR: in a Pin() a radius is greater than the pitch')
return False
if [] in [pin._radii for pin in self.pins.values()]:
print('Warning: a pin has no regions, considered as coolant channel')
if False in [self.fuelmap[i][j] in self.pins for i in range(self.N) for j in range(self.M)]:
print('ERROR: Assembly().fuelmap contains pin not included in Assembly.Pins()')
return False
if self.pool is None:
print('Warning: no pool in the problem, the surrounding of the Assembly is filled with coolant material')
self._surrounding=self._coolant
return True
if self.surrounding is None:
print('ERROR: Surrounding material has to be defined if pool is defined')
return False
#Check that the pool is around the fuel assembly
pooldummy=Rectangle(Point(self.N*self.pitch/2,self.M*self.pitch/2),
Point(self.N*self.pitch/2,-self.M*self.pitch/2),
Point(-self.N*self.pitch/2,-self.M*self.pitch/2),
Point(-self.N*self.pitch/2,self.M*self.pitch/2))
for corner in [self.pool.p1,self.pool.p2,self.pool.p3,self.pool.p4]:
if pooldummy.encloses_point(corner): #TODO use corners
print('ERROR: Pool is inside fuel')
return False
if len(pooldummy.intersection(self.pool.p1p2))>1 or \
len(pooldummy.intersection(self.pool.p2p3))>1 or \
len(pooldummy.intersection(self.pool.p3p4))>1 or \
len(pooldummy.intersection(self.pool.p4p1))>1:
print('ERROR: Assembly does not fit in pool')
return False
return True
class Detector(object):
"""A class used to represent a Detector.
Parameters
----------
detID : str
ID of the detector
Attributes
----------
detID : str
ID of the detector
location : Point()
location of the detector
collimator : Collimator(), optional
Collimator placed between the source and the detector
"""
_idName = 'detID'
def __init__(self, *args, **kwargs):
self._location=None
self._collimator=None
self._id = getID(self._idName, args, kwargs)
def __repr__(self):
return "Detector(detID=%s)" % (self._id)
@property
def location(self):
return self._location
@property
def collimator(self):
return self._collimator
def set_location(self,location=None):
"""The function to set the location of Detector
Parameters
----------
location : Point()
location of the Detector
"""
if isinstance(location,Point):
self._location=location
else:
raise TypeError('Detector location has to be Point() object')
def set_collimator(self,collimator=None):
"""The function to set the Collimator of Detector
Parameters
----------
collimator : Collimator()
Collimator between source and Detector
"""
if isinstance(collimator,Collimator):
self._collimator=collimator
else:
raise TypeError('Collimator has to be Collimator() object')
class Absorber(object):
"""A class used to represent an Absorber.
An absorber can be thought of any element around (or within) the assembly,
which attenuates gamma radiation.
Parameters
----------
absID : str
ID of the absorber
Attributes
----------
absID : str
ID of the absorber
form : Rectangle() or Circle()
the shape of the absorber
material : str
matID of the Material the absorber is made of
accommat : str
matID of the Material the absorber is surrounded with (Note: the program
has no capabilities to decide which material is around the absorber, thus
the user has to set this)
"""
_idName = 'absID'
def __init__(self, *args, **kwargs):
self._form=None
self._material=None
self._accommat=None
self._id = getID(self._idName, args, kwargs)
def __repr__(self):
return "Absorber(absID=%s)" % (self._id)
@property
def form(self):
return self._form
@property
def material(self):
return self._material
@property
def accommat(self):
return self._accommat
def set_form(self,form=None):
"""The function to set the shape of Absorber
Parameters
----------
form : Rectangle() or Circle()
shape of the absorber
"""
if isinstance(form,Rectangle) or isinstance(form,Circle):
self._form=form
else:
raise TypeError('Absorber has to be a Rectangle or Circle object')
def set_material(self, material=None):
"""The function to set the material of Absorber
Parameters
----------
material : Material()
Material the Absorber is made of
"""
if isinstance(material, Material):
self._material=material._id
else:
raise TypeError('Material() is expected')
def set_accommat(self, accommat=None):
"""The function to set the accommodating material of Absorber
Parameters
----------
accommat : Material()
Material the Absorber is surrounded with.
"""
if isinstance(accommat, Material):
self._accommat=accommat._id
else:
raise TypeError('Material() is expected')
class Collimator(object):
"""A class used to represent a Collimator.
Any gamma ray not passing through the Collimator will be rejected. Collimators
have an impact only if they are attributed to Detector objects with
:meth:`Detector.set_collimator()`.
The front and the back of the collimator cannot intersect.
Parameters
----------
collID : str (optional)
ID of the collimator
Attributes
----------
collID : str (optional)
ID of the collimator
front : Segment()
First ppening of the collimator slit
back : Segment()
Second opening of the collimator slit
color : str
color of the collimator in case of plotting the geometry.
"""
_idName = 'collID'
def __init__(self, *args, **kwargs):
self._front=None
self._back=None
self._color=None
self._id = getID(self._idName, args, kwargs)
def __repr__(self):
return "Collimator()"
@property
def front(self):
return self._front
@property
def back(self):
return self._back
@property
def color(self):
return self._color
def set_front(self,front=None):
"""The function to set the front of the Collimator.
Intersecting front and back is not accepted.
Parameters
----------
front : Segment()
Opening of the collimator slit
Examples
----------
>>> c1=Collimator()
>>> c1.set_back(Segment(Point(0,0),Point(1,0)))
>>> c1.set_front(Segment(Point(0.5,-1),Point(0.5,1)))
ValueError('Collimator back and front should not intersect')
"""
if not isinstance(front,Segment):
raise TypeError('Collimator front has to be a Segment object')
if self._back is None:
self._front=front
elif len(self._back.intersection(front))>0:
raise ValueError('Collimator back and front should not intersect')
else:
self._front=front
def set_back(self,back=None):
"""The function to set the back of the Collimator.
Intersecting front and back is not accepted.
Parameters
----------
back : Segment()
Opening of the collimator slit
"""
if not isinstance(back,Segment):
raise TypeError('Collimator back has to be a Segment object')
if self._front is None:
self._back=back
elif len(self._front.intersection(back))>0:
raise ValueError('Collimator back and front should not intersect')
else:
self._back=back
def set_color(self, color=None):
"""The function to set the color of the Collimator in case of plotting.
Parameters
----------
color : str
color definition of Collimator in hex format.
"""
if isinstance(color, str) and is_hex_color(color):
self._color=color
else:
raise ValueError(('Color has to be hex str for Material ID="{}"'.format(self._id)))
class Experiment(object):
"""A class used to represent an Experiment. An experiment is a complete passive
gamma spectroscopy measurment setup with an assembly and detectors (absorbers
and collimators are optional).
Attributes
----------
assembly : Assembly()
The Assembly containing the source
pins : dict
Dictionary containing the available pin types.
materials : dict
Dictionary containing the available materials
detectors : dict
Dictionary containing the detectors in the problem
absorbers : dict, optional
Dictionary containing the absorbers in the problem
elines : list of float, optional
Energy lines (in MeV) at which the geometric efficiency is computed (in case missing,
only the distance travelled in various material is computed)
mu : dict
The total attenuation coefficients for all the energies in elines, and for
each material in the problem.
sourcePoints : list
List of pin-wise source point locations for each random sample.
dTmap : dict of dictionaries of 2D numpy arrays
The average distance travelled by a gamma-ray from a lattice position to a detector
given for each material in the problem. Outer keys are :attr:`Detector._id` identifiers,
inner keys are :attr:`Material._id` identifiers. It is an average of all
random samples (which are kept track in :attr:`Experiment.dTmaps`)
dTmapErr : dict of dictionaries of 2D numpy arrays
The standard deviation of distance travelled by a gamma-ray from a lattice position to a detector
given for each material in the problem. Outer keys are :attr:`Detector._id` identifiers,
inner keys are :attr:`Material._id` identifiers. It is an standard deviation of all
random samples (which are kept track in :attr:`Experiment.dTmaps`)
dTmaps : list of dictionaries of 2D numpy arrays
All random samples of distance travelled by a gamma-ray from a lattice position
to a detector. Source point for each sample are stored in :attr:`Experiment.sourcePoints`
contributionMap : dict
Dictionary to store the rod-wise contributions averaged over random samples
to each detector at each energy.
Outer keys are detector :attr:`Detector.detID` identifiers.
Inner keys are energy lines (as given in :meth:`Experiment.set_elines()`)
contributionMap[detID][eline] is an NxM shaped numpy array, where
N is :attr:`Assembly.N` and M is :attr:`Assembly.M`
contributionMapErr : dict
Dictionary to store the standard deviation of rod-wise contributions averaged over
random samples to each detector at each energy.
Outer keys are detector :attr:`Detector.detID` identifiers.
Inner keys are energy lines (as given in :meth:`Experiment.set_elines()`)
contributionMapErr[detID][eline] is an NxM shaped numpy array, where
N is :attr:`Assembly.N` and M is :attr:`Assembly.M`
contributionMaps : list
All random samples of contribution maps to each detector at each energy.
contributionMapAve : dict
Dictionary to store the rod-wise contribution averaged over all detectors at each energy
averaged over all random samples.
Keys are energy lines (as given in :meth:`Experiment.set_elines()`)
contributionMapAve[eline] is an NxM shaped numpy array, where
N is :attr:`Assembly.N` and M is :attr:`Assembly.M`
contributionMapAveErr : dict
Dictionary to store the standard deviation of the pin-wise contribution averaged over
all detectors at each energy averaged over all random samples.
Keys are energy lines (as given in :meth:`Experiment.set_elines()`)
contributionMapAveErr[eline] is an NxM shaped numpy array, where
N is :attr:`Assembly.N` and M is :attr:`Assembly.M`
contributionMapAves : list
All random samples of the pin-wise contribution averaged over
all detectors at each energy.
geomEff : dict
Dictionary to store the geometric efficiency at each detector location averaged over
each random sample.
Keys are detector :attr:`Detector._id` identifiers.
geomEff[detID] is E long numpy array, where E is the length of :attr:`Experiment.elines`
geomEffErr : dict
Dictionary to store the standard deviation of the geometric efficiency
at each detector location averaged over each random sample.
Keys are detector :attr:`Detector._id` identifiers.
geomEff[detID] is E long numpy array, where E is the length of :attr:`Experiment.elines`
geomEffs : list
All random samples of the geometric efficiency at each detector location.
geomEffAve : numpy.ndarray
Geometric efficiency of the Experiment averaged over all detectors averaged over each
random sample.
The length is of :attr:`Experiment.elines`
geomEffAveErr : numpy.ndarray
Standard deviation of the geometric efficiency of the Experiment averaged over all
detectors averaged over each random sample.
The length is of :attr:`Experiment.elines`
geomEffAves : list
All random samples of the geometric efficiency of the Experiment averaged over all
detectors.
output : str, optional
filename (and path) where to print the geometric efficiency
Note
----
While computing the travelled distance, if the ray does not pass through the
collimator, np.Inf is set in the given traveled distance map for the given position. This is useful,
because the the probability of travelling infinite distance is zero, thus in the related
contribution map, at the same location 0.0 will be found.
If the geometry is so that rays emitted from any location from a pin will not pass through
the collimator, than the mean traveled map (:attr:`Experiment.dTmap`) will have np.Inf at
that location (which is correct since the mean of many infinities is infinity) and the
the standard deviation will be 0.0 (which is again correct).
However, in cases when rays emitted from some location in a pin pass through the collimator, whereas
from some other locations in a pin they do not pass through, the mean traveled distance
(:attr:`Experiment.dTmap`) and the standard deviation of the travelled distance (:attr:`Experiment.dTmapErr`)
become meaningless at such pin positions. (This could be a situation when the collimator slit is
narrower than the size of the pins). The reason is that the mean of something and infinity will become
infinity as well. Also, for the standard deviation calculation the np.Inf values are set to 0.0,
otherwise the map location would be filled with NaN.
For these cases one might analyse the list of travelled distances (:attr:`Experiment.dTmap`) and
the list of source locations (:attr:`Experiment.sourcePoints`).
Nevertheless, the contribution maps and the geometric efficiency is correctly calculated even in
these situations!
Examples
--------
Examples of plotting attributes can be found at https://github.com/ezsolti/feign/blob/master/examples/ex1_2x2fuel.ipynb
"""
def __init__(self):
self._output=None
self._assembly=None
self._pins=None
self._materials=None
self._detectors=None
self._absorbers=None
self._elines=None
self._mu=None
self._sourcePoints=None
self._dTmap=None
self._dTmapErr=None
self._dTmaps=None
self._contributionMap=None
self._contributionMapErr=None
self._contributionMaps=None
self._contributionMapAve=None
self._contributionMapAveErr=None
self._contributionMapAves=None
self._geomEff=None
self._geomEffErr=None
self._geomEffs=None
self._geomEffAve=None
self._geomEffAveErr=None
self._geomEffAves=None
self._randomNum=1
def __repr__(self):
return "Experiment()"
@property
def output(self):
return self._output
@property
def assembly(self):
return self._assembly
@property
def materials(self):
return self._materials
@property
def pins(self):
return self._pins
@property
def detectors(self):
return self._detectors
@property
def absorbers(self):
return self._absorbers
@property
def elines(self):
return np.array(self._elines).astype(float) #TODO, i want the strings for later, but for plotting float is better. Is this a correct way to do it?
#probably not because I may want the strings in processing as well. but this can be done while processing
@property
def sourcePoints(self):
return self._sourcePoints
@property
def dTmap(self):
return self._dTmap
@property
def dTmapErr(self):
return self._dTmapErr
@property
def dTmaps(self):
return self._dTmaps
@property
def contributionMap(self):
return self._contributionMap
@property
def contributionMapErr(self):
return self._contributionMapErr
@property
def contributionMaps(self):
return self._contributionMaps
@property
def contributionMapAve(self):
return self._contributionMapAve
@property
def contributionMapAveErr(self):
return self._contributionMapAveErr
@property
def contributionMapAves(self):
return self._contributionMapAves
@property
def mu(self):
return self._mu
@property
def geomEff(self):
return self._geomEff
@property
def geomEffErr(self):
return self._geomEffErr
@property
def geomEffs(self):
return self._geomEffs
@property
def geomEffAve(self):
return self._geomEffAve
@property
def geomEffAveErr(self):
return self._geomEffAveErr
@property
def geomEffAves(self):
return self._geomEffAves
@property
def randomNum(self):
return self._randomNum
def set_random(self,randomNum=1):
"""The function to set number of random source locations per pin.
Parameters
----------
randomNum : int
number of random source locations in each pin.
"""
if isinstance(randomNum, int):
self._randomNum=randomNum
else:
raise TypeError('Has to be int')
def set_output(self,output='output.dat'):
"""The function to set the output file for printing the geometric efficiency
Parameters
----------
output : str
filename and path where to print the geometric efficiency.
"""
if isinstance(output, str):
self._output=output
else:
raise TypeError('Output filename has to be str')
def set_materials(self,*argv):
"""The function to include Material objects in an Experiment
Parameters
----------
*argv : Material() or more Material() objects
Material() objects to be included in the Experiment
Examples
--------
>>> uox=Material('1')
>>> zr=Material('2')
>>> experiment=Experiment()
>>> experiment.materials
>>> experiment.set_materials(uox,zr)
>>> experiment.materials
{'1': Material(matID=1), '2': Material(matID=2)}
Raises
------
TypeError
if the parameter is not Material()
ValueError
if the Material is already included
"""
self._materials={}
self.add_material(*argv)
def add_material(self,*argv):
"""The function to add Material objects to an Experiment, which may have
already included materials. If one wants to rewrite the existing materials,
then the :meth:`Experiment.set_materials()` has to be called.
Parameters
----------
*argv : Material() or more Material() objects
Material() objects to be added in the Experiment
Examples
--------
>>> uox=Material('1')
>>> zr=Material('2')
>>> experiment=Experiment()
>>> experiment.set_materials()
>>> experiment.materials
{}
>>> experiment.add_material(uox)
>>> experiment.add_material(zr)
>>> experiment.materials
{'1': Material(matID=1), '2': Material(matID=2)}
Raises
------
TypeError
if the parameter is not Material()
ValueError
if the Material is already included
"""
if len(argv) > 0 and not isinstance(argv[0],Material):
raise TypeError('Inputs need to be Material() objects')
addIDsToDict(self._materials, *argv)
def remove_material(self,*argv):
"""The function to remove Material objects from an Experiment which
already has previously included materials.
Parameters
----------
*argv : Material() or more Material() objects
Material() objects to be added in the Experiment
Examples
--------
>>> uox=Material('1')
>>> zr=Material('2')
>>> experiment=Experiment()
>>> experiment.set_materials(uox,zr)
>>> experiment.materials
{'1': Material(matID=1), '2': Material(matID=2)}
>>> experiment.remove_material(zr)
>>> experiment.materials
{'1': Material(matID=1)}
>>> experiment.remove_material(zr)
ID 2 is not in dict yet
Raises
------
TypeError
if the parameter is not Material()
TypeError
if :attr:`materials` is None.
"""
if len(argv) > 0 and not isinstance(argv[0],Material):
raise TypeError('Inputs need to be Material() objects')
delIDsFromDict(self._materials, *argv)
def set_absorbers(self,*argv):
"""The function to include Absorber objects in an Experiment
Parameters
----------
*argv : Absorber() or more Absorber() objects
Absorber() objects to be included in the Experiment
Examples
--------
>>> leadsheet=Absorber('leadsheet')
>>> alusheet=Absorber('alusheet')
>>> experiment=Experiment()
>>> experiment.absorbers
>>> experiment.set_absorbers(leadsheet,alusheet)
>>> experiment.absorbers
{'leadsheet': Absorber(absID=leadsheet), 'alusheet': Absorber(absID=alusheet)}
Raises
------
TypeError
if the parameter is not Absorber()
ValueError
if the Absorber is already included
"""
self._absorbers={}
self.add_absorber(*argv)
def add_absorber(self,*argv):
"""The function to add Absorber objects to an Experiment, which may have
already included absorbers. If one wants to rewrite the existing absorbers,
then the :meth:`Experiment.set_absorbers()` has to be called.
Parameters
----------
*argv : Absorber() or more Absorber() objects
Absorber() objects to be added in the Experiment
Examples
--------
>>> leadsheet=Absorber('leadsheet')
>>> alusheet=Absorber('alusheet')
>>> experiment=Experiment()
>>> experiment.set_absorbers()
>>> experiment.absorbers
{}
>>> experiment.add_absorber(leadsheet)
>>> experiment.add_absorber(alusheet)
>>> experiment.absorbers
{'leadsheet': Absorber(absID=leadsheet), 'alusheet': Absorber(absID=alusheet)}
Raises
------
TypeError
if the parameter is not Absorber()
ValueError
if the Absorber is already included
"""
if len(argv) > 0 and not isinstance(argv[0],Absorber):
raise TypeError('Inputs need to be Absorber() objects')
addIDsToDict(self._absorbers, *argv)
def remove_absorber(self,*argv):
"""The function to remove Absorber objects from an Experiment which
already has previously included absorbers.
Parameters
----------
*argv : Absorber() or more Absorber() objects
Absorber() objects to be added in the Experiment
Examples
--------
>>> leadsheet=Absorber('leadsheet')
>>> alusheet=Absorber('alusheet')
>>> experiment=Experiment()
>>> experiment.set_absorbers(leadsheet,alusheet)
>>> experiment.absorbers
{'leadsheet': Absorber(absID=leadsheet), 'alusheet': Absorber(absID=alusheet)}
>>> experiment.remove_absorber(alusheet)
>>> experiment.absorbers
{'leadsheet': Absorber(absID=leadsheet)}
>>> experiment.remove_absorber(alusheet)
ID alusheet is not in dict yet
Raises
------
TypeError
if the parameter is not Absorber()
TypeError
if :attr:`absorbers` is None.
"""
if len(argv) > 0 and not isinstance(argv[0],Absorber):
raise TypeError('Inputs need to be Absorber() objects')
delIDsFromDict(self._absorbers, *argv)
def set_detectors(self,*argv):
"""The function to include Detector objects in an Experiment
Parameters
----------
*argv : Detector() or more Detector() objects
Detector() objects to be included in the Experiment
Examples
--------
>>> F5=Detector('F5')
>>> F15=Detector('F15')
>>> experiment=Experiment()
>>> experiment.detectors
>>> experiment.set_detectors(F5,F15)
>>> experiment.detectors
{'F5': Detector(detID=F5), 'F15': Detector(detID=F15)}
Raises
------
TypeError
if the parameter is not Detector()
ValueError
if the Detector is already included
"""
self._detectors={}
self.add_detector(*argv)
def add_detector(self,*argv):
"""The function to add Detector objects to an Experiment, which may have
already included detectors. If one wants to rewrite the existing detectors,
then the :meth:`Experiment.set_detectors()` has to be called.
Parameters
----------
*argv : Detector() or more Detector() objects
Detector() objects to be added in the Experiment
Examples
--------
>>> F5=Detector('F5')
>>> F15=Detector('F15')
>>> experiment=Experiment()
>>> experiment.set_detectors()
>>> experiment.detectors
{}
>>> experiment.add_detector(F5)
>>> experiment.add_detector(F15)
>>> experiment.detectors
{'F5': Detector(detID=F5), 'F15': Detector(detID=F15)}
Raises
------
TypeError
if the parameter is not Detector()
ValueError
if the Detector is already included
"""
if len(argv) > 0 and not isinstance(argv[0],Detector):
raise TypeError('Inputs need to be Detector() objects')
addIDsToDict(self._detectors, *argv)
def remove_detector(self,*argv):
"""The function to remove Detector objects from an Experiment which
already has previously included detectors.
Parameters
----------
*argv : Detector() or more Detector() objects
Detector() objects to be added in the Experiment
Examples
--------
>>> F5=Detector('F5')
>>> F15=Detector('F15')
>>> experiment=Experiment()
>>> experiment.set_detectors(F5,F15)
>>> experiment.detectors
{'F5': Detector(detID=F5), 'F15': Detector(detID=F15)}
>>> experiment.remove_detector(F15)
>>> experiment.detectors
{'F5': Detector(detID=F5)}
>>> experiment.remove_detector(F15)
ID F15 is not in dict yet
Raises
------
TypeError
if the parameter is not Detector()
TypeError
if :attr:`detectors` is None.
"""
if len(argv) > 0 and not isinstance(argv[0],Detector):
raise TypeError('Inputs need to be Detector() objects')
delIDsFromDict(self._detectors, *argv)
def set_assembly(self,assembly=None):
"""The function to include Assembly in an Experiment
Parameters
----------
assembly : Assembly()
Assembly to be included in Experiment
"""
if isinstance(assembly,Assembly):
self._assembly=assembly
self._pins=self._assembly.pins
else:
raise ValueError('Assembly has to be an Assembly() object')
def set_elines(self,elines=None):
"""The function to set energy lines at which the geometric efficiency is
calculated
Parameters
----------
elines : list of str
Energy lines (in MeV) at which the geometric efficiency is calculated.
Note
----
values of elines are strings, because they will be keys of :attr:`mu`
"""
if isinstance(elines, list) and (False not in [isinstance(e, str) for e in elines]) and (False not in [isFloat(e) for e in elines]):
self._elines=elines
else:
raise ValueError('elines has to be a list of str MeV values')
def get_MuTable(self):
"""The function to create a nested dictionary to store the total
attenuation coefficients.
Returns
-------
dict
Dictionary to store the attenuation coefficients.
Outer keys are energies as defined in :attr:`elines`,
inner keys are :attr:`Material.matID` identifiers.
"""
mu={e: {m: 0 for m in self.materials} for e in self._elines}
for m in self.materials:
mum=readMu(self.materials[m].path[0],self.materials[m].path[1],self.elines)
for ei,mui in zip(self._elines,mum):
mu[ei][m]=mui
self._mu=mu
# OLD, nicer but more file reading.
# for e in self._elines:
# mu[e]={key: readMu(self.materials[key].path[0],self.materials[key].path[1],float(e)) for key in self.materials}
# self._mu=mu
def distanceTravelled(self,detector):
"""The function to calculate the distanced travelled in any material
by a gamma ray emitted from any pin positions of the Assembly to a detector
Parameters
----------
detector : Detector()
Returns
-------
dTmap : dict
The travelled distance in various materials. Keys are material identifiers,
values are pin-wise distance values.
sourcePoint : numpy array
Pin-wise source location in the given calculation.
"""
dTmap={key: np.zeros((self.assembly.N,self.assembly.M)) for key in self.materials}
#sourcePoint=np.array([[0 for i in range(self.assembly.N)] for j in range(self.assembly.M)])
sourcePoint=np.empty((self.assembly.N,self.assembly.M),dtype=object)
#create distance seen maps for each material
p=self.assembly.pitch/2
N=self.assembly.N
M=self.assembly.M
for i in range(N):
for j in range(M):
sourceIn=[s in self.pins[self.assembly.fuelmap[i][j]]._materials for s in self.assembly.source]
if True in sourceIn:
dT={key: 0 for key in self.materials} #dict to track distances travelled in each material for a given pin
#TODO get a random number is the source material?!
#if source is the inner most pin randomly center in circle
#else: randomly in that and reject the things within
if self.randomNum == 1:
centerSource=Point(-p*(M-1)+j*2*p,p*(N-1)-i*2*p)
else:
length = self.pins[self.assembly.fuelmap[i][j]]._radii[0]*np.sqrt(np.random.uniform(0, 1)) #TODO, if second ring is the source?
angle = np.pi * np.random.uniform(0, 2)
xnoise = length * np.cos(angle)
ynoise = length * np.sin(angle)
centerSource=Point(-p*(M-1)+j*2*p,p*(N-1)-i*2*p).translate(xnoise,ynoise)
sourcePoint[i][j]=centerSource
segmentSourceDetector=Segment(centerSource,detector.location)
#Only track rays which pass through the collimator
if detector.collimator is None or (len(detector.collimator.front.intersection(segmentSourceDetector))==1 and
len(detector.collimator.back.intersection(segmentSourceDetector))==1):
###Distances traveled in other pin positions
for ii in range(N):
for jj in range(M):
centerShield=Point(-p*(M-1)+jj*2*p,p*(N-1)-ii*2*p)
pinChannel=Rectangle(centerShield.translate(-p,p),centerShield.translate(p,p),
centerShield.translate(p,-p),centerShield.translate(-p,-p))
# print('------')
# print(pinChannel)
# print(segmentSourceDetector)
# print('------')
if len(pinChannel.intersection(segmentSourceDetector))>=1: #check only pins in between Source and Detector
if ii==i and jj==j: #pinChannel.encloses_point(centerSource): #in that case, only one intersection
Dprev=0
for r,mat in zip(self.pins[self.assembly.fuelmap[ii][jj]]._radii, self.pins[self.assembly.fuelmap[ii][jj]]._materials):
intersects = Circle(centerShield,r).intersection(segmentSourceDetector)
D=Point.distance(intersects[0],centerSource)
dT[mat]=dT[mat]+(D-Dprev)
Dprev=D
else:
Dprev=0
for r,mat in zip(self.pins[self.assembly.fuelmap[ii][jj]]._radii, self.pins[self.assembly.fuelmap[ii][jj]]._materials):
intersects = Circle(centerShield,r).intersection(segmentSourceDetector)
if len(intersects)>1: #if len()==1, it is tangent, no distance traveled
D=Point.distance(intersects[0],intersects[1])
dT[mat]=dT[mat]+(D-Dprev)
Dprev=D
###Distance traveled outside the pool = distance of ray-pool intersect and detector
if self.assembly.pool is not None:
dT[self.assembly.surrounding]=dT[self.assembly.surrounding]+Point.distance(self.assembly.pool.intersection(segmentSourceDetector)[0],detector.location)
###Distance traveled in coolantMat = total source-detector distance - everything else
dT[self.assembly.coolant]=dT[self.assembly.coolant]+Point.distance(centerSource,detector.location)-sum([dT[k] for k in dT.keys()]) #in case there is a ring filled with the coolent, eg an empty control rod guide, we need keep that
###Distance traveled in absorbers
###Absorber can be Circle() or Rectangular, the syntax
###is the same regarding .intersection(), thus the code
###handles both as it is.
for absorber in self.absorbers.values():
intersects=absorber.form.intersection(segmentSourceDetector)
if len(intersects)>1:
dabs=Point.distance(intersects[0],intersects[1])
elif len(intersects)==1: #if the detector or source is within absorber.
if absorber.form.encloses_point(detector.location):
dabs=Point.distance(intersects[0],detector.location)
elif absorber.form.encloses_point(centerSource):
dabs=Point.distance(intersects[0],centerSource)
print('Warning: absorber #%s is around source at %.2f,%.2f'%(absorber._id,centerSource.x,centerSource.y))
else:
raise ValueError('Ray has only one intersection with Absorber \n and the detector neither the source is enclosed by it.')
else:
dabs=0
dT[absorber.material]=dT[absorber.material]+dabs
dT[absorber.accommat]=dT[absorber.accommat]-dabs
#Update the map
for key in dT:
dTmap[key][i][j]=dT[key]
else: #not through collimator
for key in dT:
dTmap[key][i][j]=np.Inf
return dTmap, sourcePoint
def attenuation(self,dTmap,mue,detector,sourcePoint):
"""The function to calculate the pin-wise contribution to the detector
at a given energy. That is the probablity that a gamma-ray emitted from
a pin will reach the detector point.
Parameters
----------
dTmap : dict
The travelled distance in various materials. Keys are material identifiers,
values are pin-wise distance values. Shape as created by :meth:`Experiment.distanceTravelled()`
mue : dict
Total attenuation coefficients at the given energy. Keys are materials,
values are the total attenuation coefficient values.
detector : Detector()
sourcePoint : numpy array
Pin-wise source locations, as created by :meth:`Experiment.distanceTravelled()`.
Returns
-------
contribmap : numpy array
Pin-wise probabilities that a gamma-ray emitted from a given pin hits the detector.
"""
p=self.assembly.pitch/2
N=self.assembly.N
M=self.assembly.M
contribmap=np.zeros((N,M))
for i in range(N):
for j in range(M):
center=sourcePoint[i][j]
sourceIn=[s in self.pins[self.assembly.fuelmap[i][j]]._materials for s in self.assembly.source]
if True in sourceIn:
contrib=1 #TODO might be a place to include a pre-known emission weight map. Or to provide a function which multiplies the contribution with some weight matrix
for key in self.materials.keys():
contrib=contrib*math.exp(-1*mue[key]*dTmap[key][i][j])
contribmap[i][j]=contrib/(4*math.pi*(Point.distance(center,detector.location))**2)
return contribmap
def checkComplete(self):
"""Function to check whether everything is defined correctly in an
Experiment() object.
- checks whether assembly is complete
- checks whether any pin contains any region with radius greater than the pitch
- checks whether all the pins in the fuelmap are attributed to the assembly
- in case a pool is defined, it is checked whether the pool is around the assembly.
Returns
----------
bool
True if everything is correct and complete, False otherwise
"""
errors=[]
if self.materials is None:
print('ERROR: Materials are not defined')
return False #otherwise the following checks cannot be done.
if self.assembly is None:
print('ERROR: Assembly is missing')
errors.append(False)
else:
if not self.assembly.checkComplete():
errors.append(False)
else:
if False in [mat in self.materials for pin in self.pins.values() for mat in pin._materials]:
print('ERROR: pin material is missing from materials')
errors.append(False)
if False in [source in self.materials for source in self.assembly.source]:
print('ERROR: source material is not in Materials')
errors.append(False)
if self.detectors is None:
print('ERROR: no detector is defined.')
errors.append(False)
else:
if True in [det.location is None for det in self.detectors.values()]:
print('ERROR: Detector location is not defined')
errors.append(False)
if True in [det.collimator.back is None or det.collimator.front is None for det in self.detectors.values() if det.collimator is not None]:
print('ERROR: One collimator is not fully defined')
errors.append(False)
if self.absorbers is None:
self.set_absorbers()
print('No absorbers in the problem')
else:
if self.absorbers is not None and False in [absorber.material in self.materials for absorber in self.absorbers.values()]:
print('ERROR: absorber material is missing from materials')
errors.append(False)
if self.absorbers is not None and False in [absorber.accommat in self.materials for absorber in self.absorbers.values()]:
print('ERROR: Absorber accommodating material is missing from materials')
errors.append(False)
if self._elines is None:
print('Warning: elines missing; only distance travelled in various materials will be computed')
else:
if True in [mat.density is None for mat in self.materials.values()]:
print('ERROR: Material density is missing')
errors.append(False)
if True in [mat.path is None for mat in self.materials.values()]:
print('ERROR: Path for attenuation file missing')
errors.append(False)
if len(errors)!=0:
print('%d errors encountered.'%(len(errors)))
return False
return True
def Plot(self,out=None,dpi=600,xl=[-100,100],yl=[-100,100],detectorSize=0.4):
"""Function to plot the geometry of an Experiment() object.
The function will randomly set colors to Material() objects for which colors
were previously not defined.
Parameters
----------
out : str (default=None)
name of output file
dpi : int (default=600)
dpi of the saved plot
xl : list of float (default=[-100,100])
x-direction limits of region of the geometry to plot (in cm)
yl : list of float (default=[-100,100])
y-direction limits of region of the geometry to plot (in cm)
detectorSize : float (default=400)
radius of white circle to illustrate the detector points
"""
if self.checkComplete() is False:
raise ValueError('ERROR')
import random
for mat in self.materials:
if self.materials[mat].color is None:
self.materials[mat].set_color("#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]))
pool=self.assembly.pool
N=self.assembly.N
M=self.assembly.M
p=self.assembly.pitch/2
fig, ax = plt.subplots()
ax.patch.set_facecolor(self.materials[self.assembly.surrounding].color)
if self.assembly.pool is not None:
pool=self.assembly.pool
polygon = plt.Polygon([[pool.p1.x,pool.p1.y],[pool.p2.x,pool.p2.y],[pool.p3.x,pool.p3.y],[pool.p4.x,pool.p4.y]], True,color=self.materials[self.assembly.coolant].color)
ax.add_artist(polygon)
#fuelmap
for i in range(N):
for j in range(M):
center=[-p*(M-1)+j*2*p,p*(N-1)-i*2*p]
for r,m in zip(reversed(self.pins[self.assembly.fuelmap[i][j]]._radii),reversed(self.pins[self.assembly.fuelmap[i][j]]._materials)):
circle1 = plt.Circle((center[0], center[1]), r, color=self.materials[m].color)
ax.add_artist(circle1)
for a in self.absorbers:
absorber=self.absorbers[a]
if isinstance(absorber.form,Rectangle):
polygon = plt.Polygon([[absorber.form.p1.x,absorber.form.p1.y],[absorber.form.p2.x,absorber.form.p2.y],[absorber.form.p3.x,absorber.form.p3.y],[absorber.form.p4.x,absorber.form.p4.y]], True,color=self.materials[absorber.material].color)
ax.add_artist(polygon)
else:
circle1 = plt.Circle((absorber.form.c.x,absorber.form.c.y),absorber.form.r,color=self.materials[absorber.material].color)
ax.add_artist(circle1)
for d in self.detectors:
circle1= plt.Circle((self.detectors[d].location.x,self.detectors[d].location.y),detectorSize,color='white')
ax.add_artist(circle1)
if self.detectors[d].collimator is not None:
if self.detectors[d].collimator.color is None:
self.detectors[d].collimator.set_color('#C2C5CC')
#the "orientation" of back and front is not know, so I plot two ways.
polygon=plt.Polygon([[self.detectors[d].collimator.front.p.x,self.detectors[d].collimator.front.p.y],[self.detectors[d].collimator.front.q.x, self.detectors[d].collimator.front.q.y],[self.detectors[d].collimator.back.p.x,self.detectors[d].collimator.back.p.y],[self.detectors[d].collimator.back.q.x,self.detectors[d].collimator.back.q.y]],True,color=self.detectors[d].collimator.color)
ax.add_artist(polygon)
polygon=plt.Polygon([[self.detectors[d].collimator.front.p.x,self.detectors[d].collimator.front.p.y],[self.detectors[d].collimator.front.q.x, self.detectors[d].collimator.front.q.y],[self.detectors[d].collimator.back.q.x,self.detectors[d].collimator.back.q.y],[self.detectors[d].collimator.back.p.x,self.detectors[d].collimator.back.p.y]],True,color=self.detectors[d].collimator.color)
ax.add_artist(polygon)
plt.xlim(xl[0],xl[1])
plt.ylim(yl[0],yl[1])
plt.gca().set_aspect('equal', adjustable='box')
if out is not None:
plt.savefig(out,dpi=dpi)
plt.show()
def Run(self):
"""The function to run an Experiment. It will update the dTmap, the
contributionMap and the geomEff attributes.
"""
if self.checkComplete() is False:
raise ValueError('ERROR')
sourceNorm=0
for i in range(self.assembly.N):
for j in range(self.assembly.M):
sourceIn=[s in self.pins[self.assembly.fuelmap[i][j]]._materials for s in self.assembly.source]
if True in sourceIn:
sourceNorm=sourceNorm+1
dTmaps=[]
contributionMaps=[]
contributionMapAves=[]
geomefficiencies=[]
geomefficiencyAves=[]
sourcePoints=[]
for k in range(self.randomNum):
print('#%d is being calculated'%(k)) #TODO RUN-ba
dTmap={}
for name in self.detectors:
print("Distance travelled to detector "+name+" is being calculated")
dTmap[name],sourcePoint=self.distanceTravelled(self.detectors[name])
dTmaps.append(dTmap)
sourcePoints.append(sourcePoint)
if self._elines is not None:
if k==0:
self.get_MuTable()
geomefficiency={}
geomefficiencyAve=np.zeros(len(self._elines))
contributionMapAve={e: np.zeros((self.assembly.N,self.assembly.M)) for e in self._elines}
contributionMap={}
for name in self.detectors:
print('Contribution to detector %s is calculated...'%(name))
contributionMap[name]={}
geomefficiency[name]=np.zeros(len(self._elines))
for e in self._elines:
print('...for gamma energy %s MeV'%(e))
mue=self._mu[e]
muem={key: mue[key]*self.materials[key].density for key in mue.keys()}
contributionMap[name][e]=self.attenuation(dTmap[name],muem,self.detectors[name],sourcePoint)
contributionMapAve[e]=contributionMapAve[e]+contributionMap[name][e]/len(self.detectors)
geomefficiency[name]=np.array([np.sum(contribution) for contribution in contributionMap[name].values()])/sourceNorm
geomefficiencyAve=geomefficiencyAve+geomefficiency[name]/len(self.detectors)
contributionMaps.append(contributionMap)
contributionMapAves.append(contributionMapAve)
geomefficiencies.append(geomefficiency)
geomefficiencyAves.append(geomefficiencyAve)
self._sourcePoints=sourcePoints
#Various Numpy manipulations to restructure the "plural" lists containing data for
#each random sample. Then the mean and the std of the "plural" lists is calculated.
#restructuring dTmaps: from the list of dictionaries, make a dictionary of lists
#then take the mean and std of the maps in the inner list
dTmapsRe={det: {mat: [dmap[det][mat] for dmap in dTmaps] for mat in self.materials} for det in self.detectors}
dTmap={} #will be the average
dTmapErr={} #will be the std
for det in dTmapsRe:
dTmap[det]={}
dTmapErr[det]={}
for mat in dTmapsRe[det]:
dTmapToAve=np.array([np.ravel(dT) for dT in dTmapsRe[det][mat]])
dTmap[det][mat]=np.mean(dTmapToAve,axis=0).reshape((self.assembly.N,self.assembly.M))
#dTmap elements may be np.Inf if the ray did not pass through the collimator
#this is useful to get 0 in attenuation() for those locations
#and it makes std calculation impossible, thus we set it to 0. see not in docstring!
dTmapToAve=np.where(dTmapToAve==np.Inf,0.0,dTmapToAve)
dTmapErr[det][mat]=np.std(dTmapToAve,axis=0).reshape((self.assembly.N,self.assembly.M))
self._dTmap=dTmap
self._dTmapErr=dTmapErr
self._dTmaps=dTmaps
if self._elines is not None:
#restructuring contributionMaps
contributionMapsRe={det: {e: [cmap[det][e] for cmap in contributionMaps] for e in self._elines} for det in self.detectors}
contributionMap={} #will be the average
contributionMapErr={} #will be the stdcontributionMapAves
for det in contributionMapsRe:
contributionMap[det]={}
contributionMapErr[det]={}
for e in contributionMapsRe[det]:
cMapToAve=np.array([np.ravel(cm) for cm in contributionMapsRe[det][e]])
contributionMap[det][e]=np.mean(cMapToAve,axis=0).reshape((self.assembly.N,self.assembly.M))
contributionMapErr[det][e]=np.std(cMapToAve,axis=0).reshape((self.assembly.N,self.assembly.M))
self._contributionMap=contributionMap
self._contributionMapErr=contributionMapErr
self._contributionMaps=contributionMaps
#restructuring contributionMapAves
contributionMapAvesRe={e: [cmap[e] for cmap in contributionMapAves] for e in self._elines}
contributionMapAve={} #will be the average
contributionMapAveErr={} #will be the std
for e in contributionMapAvesRe:
cMapToAve=np.array([np.ravel(cm) for cm in contributionMapAvesRe[e]])
contributionMapAve[e]=np.mean(cMapToAve,axis=0).reshape((self.assembly.N,self.assembly.M))
contributionMapAveErr[e]=np.std(cMapToAve,axis=0).reshape((self.assembly.N,self.assembly.M))
self._contributionMapAve=contributionMapAve
self._contributionMapAveErr=contributionMapAveErr
self._contributionMapAves=contributionMapAves
#restructuring geomefficiencies
geomefficienciesRe={det: [geff[det] for geff in geomefficiencies] for det in self._detectors}
geomefficiency={} #will be the average
geomefficiencyErr={} #will be the std
for det in geomefficienciesRe:
geffToAve=np.array([geff for geff in geomefficienciesRe[det]])
geomefficiency[det]=np.mean(geffToAve,axis=0)
geomefficiencyErr[det]=np.std(geffToAve,axis=0)
self._geomEff=geomefficiency
self._geomEffErr=geomefficiencyErr
self._geomEffs=geomefficiencies
#restructuring geomefficiencyAves
geomefficiencyAve=np.mean(np.array([geff for geff in geomefficiencyAves]),axis=0)
geomefficiencyAveErr=np.std(np.array([geff for geff in geomefficiencyAves]),axis=0)
self._geomEffAve=geomefficiencyAve
self._geomEffAveErr=geomefficiencyAveErr
self._geomEffAves=geomefficiencyAves
if self.output is not None:
output=open(self.output,'w')
for e,c in zip(self._elines,self._geomEffAve):
output.write(e+'\t'+str(c)+'\n')
output.close()
|
[
"numpy.sum",
"numpy.ravel",
"numpy.empty",
"numpy.mean",
"numpy.sin",
"matplotlib.pyplot.gca",
"numpy.interp",
"numpy.std",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.Circle",
"numpy.cos",
"matplotlib.pyplot.Polygon",
"re.compile",
"matplotlib.pyplot.xlim",
"numpy.random.uniform",
"math.exp",
"os.getcwd",
"numpy.zeros",
"random.choice",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.savefig"
] |
[((1225, 1250), 'numpy.interp', 'np.interp', (['energy', 'en', 'mu'], {}), '(energy, en, mu)\n', (1234, 1250), True, 'import numpy as np\n'), ((1853, 1880), 're.compile', 're.compile', (['HEX_COLOR_REGEX'], {}), '(HEX_COLOR_REGEX)\n', (1863, 1880), False, 'import re\n'), ((14130, 14147), 'numpy.array', 'np.array', (['fuelmap'], {}), '(fuelmap)\n', (14138, 14147), True, 'import numpy as np\n'), ((48972, 49030), 'numpy.empty', 'np.empty', (['(self.assembly.N, self.assembly.M)'], {'dtype': 'object'}), '((self.assembly.N, self.assembly.M), dtype=object)\n', (48980, 49030), True, 'import numpy as np\n'), ((56869, 56885), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (56877, 56885), True, 'import numpy as np\n'), ((62164, 62178), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (62176, 62178), True, 'import matplotlib.pyplot as plt\n'), ((64939, 64961), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xl[0]', 'xl[1]'], {}), '(xl[0], xl[1])\n', (64947, 64961), True, 'import matplotlib.pyplot as plt\n'), ((64969, 64991), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yl[0]', 'yl[1]'], {}), '(yl[0], yl[1])\n', (64977, 64991), True, 'import matplotlib.pyplot as plt\n'), ((65120, 65130), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (65128, 65130), True, 'import matplotlib.pyplot as plt\n'), ((48780, 48824), 'numpy.zeros', 'np.zeros', (['(self.assembly.N, self.assembly.M)'], {}), '((self.assembly.N, self.assembly.M))\n', (48788, 48824), True, 'import numpy as np\n'), ((62360, 62535), 'matplotlib.pyplot.Polygon', 'plt.Polygon', (['[[pool.p1.x, pool.p1.y], [pool.p2.x, pool.p2.y], [pool.p3.x, pool.p3.y], [\n pool.p4.x, pool.p4.y]]', '(True)'], {'color': 'self.materials[self.assembly.coolant].color'}), '([[pool.p1.x, pool.p1.y], [pool.p2.x, pool.p2.y], [pool.p3.x,\n pool.p3.y], [pool.p4.x, pool.p4.y]], True, color=self.materials[self.\n assembly.coolant].color)\n', (62371, 62535), True, 'import matplotlib.pyplot as plt\n'), ((63639, 63744), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(self.detectors[d].location.x, self.detectors[d].location.y)', 'detectorSize'], {'color': '"""white"""'}), "((self.detectors[d].location.x, self.detectors[d].location.y),\n detectorSize, color='white')\n", (63649, 63744), True, 'import matplotlib.pyplot as plt\n'), ((65087, 65112), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out'], {'dpi': 'dpi'}), '(out, dpi=dpi)\n', (65098, 65112), True, 'import matplotlib.pyplot as plt\n'), ((34198, 34220), 'numpy.array', 'np.array', (['self._elines'], {}), '(self._elines)\n', (34206, 34220), True, 'import numpy as np\n'), ((63124, 63372), 'matplotlib.pyplot.Polygon', 'plt.Polygon', (['[[absorber.form.p1.x, absorber.form.p1.y], [absorber.form.p2.x, absorber.\n form.p2.y], [absorber.form.p3.x, absorber.form.p3.y], [absorber.form.p4\n .x, absorber.form.p4.y]]', '(True)'], {'color': 'self.materials[absorber.material].color'}), '([[absorber.form.p1.x, absorber.form.p1.y], [absorber.form.p2.x,\n absorber.form.p2.y], [absorber.form.p3.x, absorber.form.p3.y], [\n absorber.form.p4.x, absorber.form.p4.y]], True, color=self.materials[\n absorber.material].color)\n', (63135, 63372), True, 'import matplotlib.pyplot as plt\n'), ((63434, 63553), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(absorber.form.c.x, absorber.form.c.y)', 'absorber.form.r'], {'color': 'self.materials[absorber.material].color'}), '((absorber.form.c.x, absorber.form.c.y), absorber.form.r, color=\n self.materials[absorber.material].color)\n', (63444, 63553), True, 'import matplotlib.pyplot as plt\n'), ((64073, 64482), 'matplotlib.pyplot.Polygon', 'plt.Polygon', (['[[self.detectors[d].collimator.front.p.x, self.detectors[d].collimator.\n front.p.y], [self.detectors[d].collimator.front.q.x, self.detectors[d].\n collimator.front.q.y], [self.detectors[d].collimator.back.p.x, self.\n detectors[d].collimator.back.p.y], [self.detectors[d].collimator.back.q\n .x, self.detectors[d].collimator.back.q.y]]', '(True)'], {'color': 'self.detectors[d].collimator.color'}), '([[self.detectors[d].collimator.front.p.x, self.detectors[d].\n collimator.front.p.y], [self.detectors[d].collimator.front.q.x, self.\n detectors[d].collimator.front.q.y], [self.detectors[d].collimator.back.\n p.x, self.detectors[d].collimator.back.p.y], [self.detectors[d].\n collimator.back.q.x, self.detectors[d].collimator.back.q.y]], True,\n color=self.detectors[d].collimator.color)\n', (64084, 64482), True, 'import matplotlib.pyplot as plt\n'), ((64514, 64923), 'matplotlib.pyplot.Polygon', 'plt.Polygon', (['[[self.detectors[d].collimator.front.p.x, self.detectors[d].collimator.\n front.p.y], [self.detectors[d].collimator.front.q.x, self.detectors[d].\n collimator.front.q.y], [self.detectors[d].collimator.back.q.x, self.\n detectors[d].collimator.back.q.y], [self.detectors[d].collimator.back.p\n .x, self.detectors[d].collimator.back.p.y]]', '(True)'], {'color': 'self.detectors[d].collimator.color'}), '([[self.detectors[d].collimator.front.p.x, self.detectors[d].\n collimator.front.p.y], [self.detectors[d].collimator.front.q.x, self.\n detectors[d].collimator.front.q.y], [self.detectors[d].collimator.back.\n q.x, self.detectors[d].collimator.back.q.y], [self.detectors[d].\n collimator.back.p.x, self.detectors[d].collimator.back.p.y]], True,\n color=self.detectors[d].collimator.color)\n', (64525, 64923), True, 'import matplotlib.pyplot as plt\n'), ((64999, 65008), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (65006, 65008), True, 'import matplotlib.pyplot as plt\n'), ((68975, 69022), 'numpy.where', 'np.where', (['(dTmapToAve == np.Inf)', '(0.0)', 'dTmapToAve'], {}), '(dTmapToAve == np.Inf, 0.0, dTmapToAve)\n', (68983, 69022), True, 'import numpy as np\n'), ((71332, 71384), 'numpy.array', 'np.array', (['[geff for geff in geomefficienciesRe[det]]'], {}), '([geff for geff in geomefficienciesRe[det]])\n', (71340, 71384), True, 'import numpy as np\n'), ((71421, 71447), 'numpy.mean', 'np.mean', (['geffToAve'], {'axis': '(0)'}), '(geffToAve, axis=0)\n', (71428, 71447), True, 'import numpy as np\n'), ((71486, 71511), 'numpy.std', 'np.std', (['geffToAve'], {'axis': '(0)'}), '(geffToAve, axis=0)\n', (71492, 71511), True, 'import numpy as np\n'), ((71740, 71787), 'numpy.array', 'np.array', (['[geff for geff in geomefficiencyAves]'], {}), '([geff for geff in geomefficiencyAves])\n', (71748, 71787), True, 'import numpy as np\n'), ((71836, 71883), 'numpy.array', 'np.array', (['[geff for geff in geomefficiencyAves]'], {}), '([geff for geff in geomefficiencyAves])\n', (71844, 71883), True, 'import numpy as np\n'), ((62862, 62930), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(center[0], center[1])', 'r'], {'color': 'self.materials[m].color'}), '((center[0], center[1]), r, color=self.materials[m].color)\n', (62872, 62930), True, 'import matplotlib.pyplot as plt\n'), ((66495, 66539), 'numpy.zeros', 'np.zeros', (['(self.assembly.N, self.assembly.M)'], {}), '((self.assembly.N, self.assembly.M))\n', (66503, 66539), True, 'import numpy as np\n'), ((68530, 68542), 'numpy.ravel', 'np.ravel', (['dT'], {}), '(dT)\n', (68538, 68542), True, 'import numpy as np\n'), ((68606, 68633), 'numpy.mean', 'np.mean', (['dTmapToAve'], {'axis': '(0)'}), '(dTmapToAve, axis=0)\n', (68613, 68633), True, 'import numpy as np\n'), ((69055, 69081), 'numpy.std', 'np.std', (['dTmapToAve'], {'axis': '(0)'}), '(dTmapToAve, axis=0)\n', (69061, 69081), True, 'import numpy as np\n'), ((70566, 70578), 'numpy.ravel', 'np.ravel', (['cm'], {}), '(cm)\n', (70574, 70578), True, 'import numpy as np\n'), ((70654, 70680), 'numpy.mean', 'np.mean', (['cMapToAve'], {'axis': '(0)'}), '(cMapToAve, axis=0)\n', (70661, 70680), True, 'import numpy as np\n'), ((70764, 70789), 'numpy.std', 'np.std', (['cMapToAve'], {'axis': '(0)'}), '(cMapToAve, axis=0)\n', (70770, 70789), True, 'import numpy as np\n'), ((987, 998), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (996, 998), False, 'import os\n'), ((50100, 50123), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (50117, 50123), True, 'import numpy as np\n'), ((50166, 50179), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (50172, 50179), True, 'import numpy as np\n'), ((50222, 50235), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (50228, 50235), True, 'import numpy as np\n'), ((57407, 57449), 'math.exp', 'math.exp', (['(-1 * mue[key] * dTmap[key][i][j])'], {}), '(-1 * mue[key] * dTmap[key][i][j])\n', (57415, 57449), False, 'import math\n'), ((69776, 69788), 'numpy.ravel', 'np.ravel', (['cm'], {}), '(cm)\n', (69784, 69788), True, 'import numpy as np\n'), ((69872, 69898), 'numpy.mean', 'np.mean', (['cMapToAve'], {'axis': '(0)'}), '(cMapToAve, axis=0)\n', (69879, 69898), True, 'import numpy as np\n'), ((69988, 70013), 'numpy.std', 'np.std', (['cMapToAve'], {'axis': '(0)'}), '(cMapToAve, axis=0)\n', (69994, 70013), True, 'import numpy as np\n'), ((49998, 50021), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (50015, 50021), True, 'import numpy as np\n'), ((61974, 62007), 'random.choice', 'random.choice', (['"""0123456789ABCDEF"""'], {}), "('0123456789ABCDEF')\n", (61987, 62007), False, 'import random\n'), ((67359, 67379), 'numpy.sum', 'np.sum', (['contribution'], {}), '(contribution)\n', (67365, 67379), True, 'import numpy as np\n')]
|
import os
from subprocess import STDOUT, check_call, CalledProcessError
class Apt:
def __init__(self, stdout=open(os.devnull, 'wb'), stderr=STDOUT):
self.stdout = stdout
self.stderr = stderr
def command(self, structure):
try:
return check_call(structure, stdout=self.stdout, stderr=self.stderr)
except CalledProcessError as ep:
return ep.returncode
def install(self, package, ally="-y"):
return self.command(['apt-get', 'install', ally, package])
def add_repository(self, repo, ally="-y"):
return self.command(['apt-add-repository', ally, repo])
def update(self):
return self.command(['apt-get', 'update'])
def upgrade(self):
return self.command(['apt-get', 'upgrade'])
def autoclean(self):
return self.command(['apt-get', 'autoclean'])
|
[
"subprocess.check_call"
] |
[((280, 341), 'subprocess.check_call', 'check_call', (['structure'], {'stdout': 'self.stdout', 'stderr': 'self.stderr'}), '(structure, stdout=self.stdout, stderr=self.stderr)\n', (290, 341), False, 'from subprocess import STDOUT, check_call, CalledProcessError\n')]
|
import requests
from backend.interfaces import DataSender
class TelegramSender(DataSender):
def __init__(self, api_token: str, chat_id: str) -> None:
self.api_token = api_token
self.chat_id = chat_id
def send_message(self, msg) -> None:
if self.api_token and self.chat_id:
sent_msg = (
"https://api.telegram.org/bot"
+ self.api_token
+ "/sendMessage?chat_id="
+ self.chat_id
+ "&parse_mode=Markdown&text="
+ msg
)
requests.get(sent_msg)
else:
raise RuntimeError("Missing Telegram token or chat id")
|
[
"requests.get"
] |
[((582, 604), 'requests.get', 'requests.get', (['sent_msg'], {}), '(sent_msg)\n', (594, 604), False, 'import requests\n')]
|
import numpy as np
import numpy_financial as npf
import numpy.linalg as linalg
from datetime import date
from workalendar.america import Brazil
cal = Brazil()
class Titulos:
def __init__(self, taxa, start, end, c=0):
"""
:param fv: Face Value
:param c: coupon
:param start: settlement date
:param end: Maturity date
"""
self.fv = 1000
self.taxa = taxa
self.coupon = c
self.start = start
self.end = end
def ltn(self):
"""
:param taxa: taxa contratada
:param DU: Número de dias úteis
:return:
"""
DU = cal.get_working_days_delta(self.start, self.end)
# Lidando com possível erro de entrada do usuário
if self.taxa < 1:
taxa = self.taxa
elif self.taxa > 1:
taxa = self.taxa / 100
price = self.fv / (1 + taxa) ** (DU / 252)
return price
def ntn_f(self):
"""
compute log returns for each ticker.
parameters
----------
taxa : scalar
taxa contratada da ltn
DU: list
Dias úteis do pagamento de cada cupon
returns
-------
preço : escalar
preço de uma NTN-F
"""
taxa = self.taxa
# Lidando com possível erro de entrada do usuário
if taxa < 1:
taxa = taxa
elif taxa > 1:
taxa = taxa / 100
# O Valor de Face sempre será 1000
fv = 1000
#Solucionar para as datas gerando cupons
start = self.start
end_year = date(start.year, 12, 31)
start_next_year = date(start.year + 1, 1, 1)
maturity = self.end
#days é o restante do ano e days2 o restante do contrato após o ano seguinte
days = cal.get_working_days_delta(start, end_year)
days2 = cal.get_working_days_delta(start_next_year, maturity)
if days < 252 / 2:
coupon = 1
else:
coupon = 2
number_coupons = int((days2 / 252) * 2) + coupon
# Criamos uma np.array para facilitar manipulação
DU = np.zeros(number_coupons)
for i in range(1, number_coupons):
DU[0] = days
DU[i] = DU[i - 1] + 252 / 2
# O valor do cupom é fixo e é melhor aproximado por essa conta
c = fv * ((1 + 0.1) ** (1 / 2) - 1)
# Manipulação para a criação do fluxo de caixa
terminal = fv + c
fluxo_caixa = np.full(len(DU), fill_value=c)
fluxo_caixa[-1] = terminal
dcf = np.full(len(DU), fill_value=taxa)
price = sum(fluxo_caixa / (1 + dcf) ** (DU / 252))
return price
def ntn_b_principal(self, VNA, IPCA):
"""
Essa função retorna o preço de uma NTN-B
:param taxa: Na hora da negociação
:param VNA: VNA na hora da negociação
:param DU = Dias úteis até o vencimento
:param IPCA: IPCA projetado. Deve estar em percentual
:param date: Data em que a negociação será feita
:return: preço da NTN-B
"""
DU = self.end - self.start
DU = DU.days
my_date = self.start
taxa = self.taxa
# Lidando com possível erro de entrada do usuário
if taxa < 1:
taxa = taxa
elif taxa > 1:
taxa = taxa / 100
IPCA = IPCA / 100
# Lidando com o tempo e seus casos extremos
# Esse é um ponto crítico no cálculo da NTN-B
if my_date.month == 1:
last = datetime.date(my_date.year - 1, 12, 15)
else:
last = datetime.date(my_date.year, my_date.month - 1, 15)
if my_date.day < 15:
this_month = datetime.date(my_date.year, my_date.month, 15)
else:
this_month = my_date
if my_date.month == 12:
next_month = datetime.date(my_date.year + 1, 1, 15)
else:
next_month = datetime.date(my_date.year, my_date.month + 1, 15)
# A partir daqui come;a o real cálculo da ntn-b
# Se o cálculo é para o DIA 15, o VNA náo precisa ser calculado
if my_date.day == 15:
VNA_p = VNA
else:
pr = (my_date - last) / (next_month - this_month)
VNA_p = VNA * (1 + IPCA) ** (pr)
#Calculando a cotação
cotacao = 1 / (1 + taxa) ** (DU / 252)
#preço de compra da NTN-B
valor = VNA_p * cotacao
return valor
def ntn_b(self, VNA, DU, IPCA, date):
"""
Essa função retorna o preço de uma NTN-B
:param taxa: Na hora da negociação
:param VNA: VNA na hora da negociação
:param DU = Lista de dias úteis de cada cupom
:param IPCA: IPCA projetado. Deve estar em percentual
:param date: Data em que a negociação será feita
:return: preço da NTN-B
"""
taxa = self.taxa
# Lidando com possível erro de entrada do usuário
if taxa < 1:
taxa = taxa
elif taxa > 1:
taxa = taxa / 100
# Criamos uma np.array para facilitar manipulação
DU = np.array(DU)
# Temos que normalizar o IPCa
IPCA = IPCA / 100
# Lidando com o tempo e seus casos extremos
# Esse é um ponto crítico no cálculo da NTN-B
my_date = date
if my_date.month == 1:
last = datetime.date(my_date.year - 1, 12, 15)
else:
last = datetime.date(my_date.year, my_date.month - 1, 15)
if my_date.day < 15:
this_month = datetime.date(my_date.year, my_date.month, 15)
else:
this_month = my_date
if my_date.month == 12:
next_month = datetime.date(my_date.year + 1, 1, 15)
else:
next_month = datetime.date(my_date.year, my_date.month + 1, 15)
# A partir daqui começa o real cálculo da ntn-b
pr = (my_date - last) / (next_month - this_month)
# Calculando e controlando o VNA projetado
if my_date.day == 15:
VNA_p = VNA
else:
VNA_p = VNA * (1 + IPCA) ** (pr)
#Calculando a cotação que inclui os cupons
c = ((1 + 0.06) ** (1 / 2) - 1)
terminal = 1
fluxo_caixa = np.full(len(DU), fill_value=c)
fluxo_caixa[-1] = fluxo_caixa[-1] + terminal
dcf = np.full(len(DU), fill_value=taxa)
cotacao = sum(fluxo_caixa / (1 + dcf) ** (DU / 252))
#preço de compra da NTN-B
valor_final = VNA_p * cotacao
return valor_final
def lft(self, taxa, DU, VNA, selic):
"""
Retorna o preço de compra de uma LFT
:param taxa: taxa contratada com ágio ou deságio
:param DU: dias úteis
:param VNA: VNA corrigido pela SELIC
:param selic: Taxa Selic projetada e anualizada
:return:
"""
if selic < 1:
selic = selic
elif taxa > 1:
selic = selic / 100
VNA_p = VNA * (1 + selic) ** (1 / 252)
cotacao = 1 / (1 + taxa) ** (DU / 252)
return VNA_p * cotacao
def bondPrice(self, ttm: int , ytm):
"""
param:
ttm = Time to maturity
ytm = Yield to Maturity
"""
c = self.coupon
fv = self.fv
cashFlow = []
if c < 0:
c = c
else:
c = c/100
if ytm < 0:
ytm = ytm
else:
ytm = ytm/100
[cashFlow.append((fv*c)/(1+ytm)**(i)) for i in range(1,ttm)]
cashFlow.append((fv+(fv*c))/(1+ytm)**(ttm))
price = sum(cashFlow)
return price
def ytm(self, r):
"""
This function return the prices of each path.
:param fv: Bonds Face Value
:param rates: a list of interest rates
:param c: coupon rate
:return: Bond Yield to Maturity and Price
"""
#Setting ttm = Time to maturity
ttm = len(r)
fv = self.fv
c = self.coupon
if c > 1:
c = c/100
else:
c = c
ttm1 = ttm-1
#Creating a coupon array
cashF = np.array([fv*c]*ttm1)
cashF = np.append(cashF,(fv*c)+fv)
#Create a array with zeros to fill with discounted factors
dcf = np.zeros(ttm, dtype=np.double)
for i in range(0,ttm):
dcf[i] = cashF[i] * (1/(1+r[i])**(i+1))
# Fiding prices
price = np.sum(dcf)
#Creating cash flow structure to calculate YTM
cashF = np.insert(cashF, 0, -price)
ytm = npf.irr(cashF)
Bond_Characteristics = { "Bond price": price,
"Bond Yield":round(ytm*100, 3),
}
return Bond_Characteristics
def bondPriceElasticity(self, rates, delta):
"""
:param fv: Bonds Face Value
:param rates: a list of interest rates
:param c: coupon rate
:param delta: change in Yield to Maturity
:return: Bond Yield to Maturity and Price
"""
fv = self.fv
c = self.coupon
ttm = len(rates)
values = ytm(rates)
price = values['Bond price']
rates = np.array(rates)
rates = rates*(1/100)
c = c/100
delta = delta/100
ttm1 = ttm-1
cashF = np.vstack([fv*c]*ttm1)
cashF = np.append(cashF,(fv*c)+fv)
dcf = np.array([])
for i in range(0,ttm):
dcf = np.append(dcf,cashF[i]*(i+1)/(1+rates[i])**(i+2) )
pe_factor = np.sum(dcf)
b = -1/price
price_elasticity = b*pe_factor
delta_bond = -price*abs(price_elasticity)*delta
bond_elasticity = {'Bond Price': price,
'Bond new price': price+delta_bond,
'Bond Elasticity':price_elasticity,
'Price Change': delta_bond,
}
return bond_elasticity
def convexity(self, r):
"""
param:
fv = Face Value
ttm = Time to maturity
c = coupon
ytm = Yield to Maturity
"""
fv = self.fv
c = self.coupon
ytm = ytm(r)
ttm = len(r)
price = bondPrice(fv, ttm, c, ytm)
c = c/100
ytm = ytm/100
x = []
[x.append((fv*c)/(1+ytm)**(i)) for i in range(1,ttm)]
x.append((fv+(fv*c))/(1+ytm)**(ttm))
y = []
[y.append(x[i] * (i+1) * (i+2) ) for i in range(0,ttm)]
dfc = sum(y)
cx = (dfc*0.5)/(((1+ytm)**2)*price)
return cx
def mac_mod_cx_duration(self, r):
"""
param:
fv = Face Value
ttm = Time to maturity; If the bond is a perpetuity, ttm =0
c = coupon
ytm = Yield to Maturity
"""
fv = self.fv
c = self.coupon
ytm = ytm(r)
ttm = len(r)
price = bondPrice(fv, ttm, c, ytm)
cx = convexity(fv,ttm,c, ytm)
c = c/100
ytm = ytm/100
x =[]
if ttm == 0:
modD = 1/ytm
D = modD*(1+ytm)
else:
if c == 0:
D = ttm
else:
[x.append((fv*c)/(1+ytm)**(i) *i) for i in range(1,ttm)]
x.append((fv+(fv*c))/(1+ytm)**(ttm) * ttm)
d = sum(x)
D = d/price
modD = D/(1+ytm)
bond_cara = {'Price': price,
'Bond Macauly Duration':D,
'Modified Duration': modD,
'Convexity': cx,
}
return bond_cara
def bondRisk(self, r, delta):
"""
param:
fv = Face Value
ttm = Time to maturity; If the bond is a perpetuity, ttm =0
c = coupon
ytm = Yield to Maturity
delta = change in yield to maturity
"""
fv = self.fv
ttm = len(r)
c = self.coupon
ytm = ytm(r)
delta = delta/100
values = mac_mod_cx_duration(fv,ttm,c,ytm)
price = values['Price']
D = values['Bond Macauly Duration']
modD = values['Modified Duration']
cx = values['Convexity']
"""Now we calculate the change in price of this bond.
delta_price = is the approximation only with modifie duration
delta_price2 = is the approximation with modifie duration and convexity
"""
delta_price = -price*modD*delta
delta_price2 = price*(-modD*delta + cx*delta**2)
""" Now we use the change to see what is the new price in these two cases. l
"""
newprice = price + delta_price
newprice2 = price + delta_price2
bond_risks = {'Bond Macauly Duration':D,
'Bond Modified Duration':modD,
'first order change in price': delta_price,
'Second order change in price':delta_price2,
'Original Price':price,
'New price Mod Duration': newprice,
'New price Mod and Convexity': newprice2,
}
return bond_risks
def arbitrageEstrategy(fv, price, c, ttm):
"""
param:
fv = Face Value
ttm = List with Time to Maturity.
c = List with Coupon interest;
ytm = List with Yield to Maturity.
"""
price = np.array(price)
price = price * (-1)
fv = np.array(fv)
c = np.array(c)
c = c/100
ttm = np.array(ttm)
# The shape of the Matrix. This will depend of the bond with the higher time to maturity.
mformat = np.max(ttm)
ttmz = ttm-1
# Manipulação das constantes para a estratégia
cons = np.array([])
len = mformat
cons = np.vstack([0]*(len))
cons = np.insert(cons, 0, 100)
#creating a Matrix
Matrix = np.zeros((price.size, mformat))
# Setting the Matrix
#Setting the o coupon bonds
for i in range(0, price.size):
if (c[i] == 0):
Matrix[i][ttmz[i]] = fv
else:
pass
#Setting the coupon payment
for i in range(0, price.size):
if (np.max(Matrix[i][:]) == 0):
for j in range(0, mformat):
Matrix[i][j] = (fv*c[i])
#Setting the principal payment
for i in range(0, price.size):
for j in range(0, mformat):
if(c[i] != 0 and j == ttmz[i]):
Matrix[i][j] = fv+(fv*c[i])
#Cleaning the Matrix
for i in range(0, price.size):
for j in range(0, mformat):
if(c[i] != 0 and j > ttmz[i]):
Matrix[i][j] = 0
#Get together prices array and matrix
matrix = np.column_stack((price, Matrix))
#Transposing the Matrix
matrix = np.transpose(matrix)
#Solving the Matrix. Maybe another function for non-quadratic matrix
answer = linalg.solve(matrix, cons)
return answer
|
[
"workalendar.america.Brazil",
"numpy.sum",
"numpy_financial.irr",
"numpy.zeros",
"datetime.date",
"numpy.transpose",
"numpy.insert",
"numpy.append",
"numpy.max",
"numpy.array",
"numpy.column_stack",
"numpy.linalg.solve",
"numpy.vstack"
] |
[((150, 158), 'workalendar.america.Brazil', 'Brazil', ([], {}), '()\n', (156, 158), False, 'from workalendar.america import Brazil\n'), ((1621, 1645), 'datetime.date', 'date', (['start.year', '(12)', '(31)'], {}), '(start.year, 12, 31)\n', (1625, 1645), False, 'from datetime import date\n'), ((1672, 1698), 'datetime.date', 'date', (['(start.year + 1)', '(1)', '(1)'], {}), '(start.year + 1, 1, 1)\n', (1676, 1698), False, 'from datetime import date\n'), ((2159, 2183), 'numpy.zeros', 'np.zeros', (['number_coupons'], {}), '(number_coupons)\n', (2167, 2183), True, 'import numpy as np\n'), ((5156, 5168), 'numpy.array', 'np.array', (['DU'], {}), '(DU)\n', (5164, 5168), True, 'import numpy as np\n'), ((8190, 8215), 'numpy.array', 'np.array', (['([fv * c] * ttm1)'], {}), '([fv * c] * ttm1)\n', (8198, 8215), True, 'import numpy as np\n'), ((8228, 8257), 'numpy.append', 'np.append', (['cashF', '(fv * c + fv)'], {}), '(cashF, fv * c + fv)\n', (8237, 8257), True, 'import numpy as np\n'), ((8338, 8368), 'numpy.zeros', 'np.zeros', (['ttm'], {'dtype': 'np.double'}), '(ttm, dtype=np.double)\n', (8346, 8368), True, 'import numpy as np\n'), ((8493, 8504), 'numpy.sum', 'np.sum', (['dcf'], {}), '(dcf)\n', (8499, 8504), True, 'import numpy as np\n'), ((8577, 8604), 'numpy.insert', 'np.insert', (['cashF', '(0)', '(-price)'], {}), '(cashF, 0, -price)\n', (8586, 8604), True, 'import numpy as np\n'), ((8619, 8633), 'numpy_financial.irr', 'npf.irr', (['cashF'], {}), '(cashF)\n', (8626, 8633), True, 'import numpy_financial as npf\n'), ((9239, 9254), 'numpy.array', 'np.array', (['rates'], {}), '(rates)\n', (9247, 9254), True, 'import numpy as np\n'), ((9366, 9392), 'numpy.vstack', 'np.vstack', (['([fv * c] * ttm1)'], {}), '([fv * c] * ttm1)\n', (9375, 9392), True, 'import numpy as np\n'), ((9405, 9434), 'numpy.append', 'np.append', (['cashF', '(fv * c + fv)'], {}), '(cashF, fv * c + fv)\n', (9414, 9434), True, 'import numpy as np\n'), ((9446, 9458), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9454, 9458), True, 'import numpy as np\n'), ((9579, 9590), 'numpy.sum', 'np.sum', (['dcf'], {}), '(dcf)\n', (9585, 9590), True, 'import numpy as np\n'), ((13415, 13430), 'numpy.array', 'np.array', (['price'], {}), '(price)\n', (13423, 13430), True, 'import numpy as np\n'), ((13474, 13486), 'numpy.array', 'np.array', (['fv'], {}), '(fv)\n', (13482, 13486), True, 'import numpy as np\n'), ((13499, 13510), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (13507, 13510), True, 'import numpy as np\n'), ((13544, 13557), 'numpy.array', 'np.array', (['ttm'], {}), '(ttm)\n', (13552, 13557), True, 'import numpy as np\n'), ((13675, 13686), 'numpy.max', 'np.max', (['ttm'], {}), '(ttm)\n', (13681, 13686), True, 'import numpy as np\n'), ((13779, 13791), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13787, 13791), True, 'import numpy as np\n'), ((13829, 13849), 'numpy.vstack', 'np.vstack', (['([0] * len)'], {}), '([0] * len)\n', (13838, 13849), True, 'import numpy as np\n'), ((13865, 13888), 'numpy.insert', 'np.insert', (['cons', '(0)', '(100)'], {}), '(cons, 0, 100)\n', (13874, 13888), True, 'import numpy as np\n'), ((13934, 13965), 'numpy.zeros', 'np.zeros', (['(price.size, mformat)'], {}), '((price.size, mformat))\n', (13942, 13965), True, 'import numpy as np\n'), ((14860, 14892), 'numpy.column_stack', 'np.column_stack', (['(price, Matrix)'], {}), '((price, Matrix))\n', (14875, 14892), True, 'import numpy as np\n'), ((14943, 14963), 'numpy.transpose', 'np.transpose', (['matrix'], {}), '(matrix)\n', (14955, 14963), True, 'import numpy as np\n'), ((15059, 15085), 'numpy.linalg.solve', 'linalg.solve', (['matrix', 'cons'], {}), '(matrix, cons)\n', (15071, 15085), True, 'import numpy.linalg as linalg\n'), ((9508, 9570), 'numpy.append', 'np.append', (['dcf', '(cashF[i] * (i + 1) / (1 + rates[i]) ** (i + 2))'], {}), '(dcf, cashF[i] * (i + 1) / (1 + rates[i]) ** (i + 2))\n', (9517, 9570), True, 'import numpy as np\n'), ((14272, 14292), 'numpy.max', 'np.max', (['Matrix[i][:]'], {}), '(Matrix[i][:])\n', (14278, 14292), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ROI_label_window_dark.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SaveROI(object):
def setupUi(self, SaveROI):
SaveROI.setObjectName("SaveROI")
SaveROI.resize(590, 462)
SaveROI.setStyleSheet("background-color: rgb(28, 29, 73);\n"
"color: rgb(255, 255, 255);")
self.label = QtWidgets.QLabel(SaveROI)
self.label.setGeometry(QtCore.QRect(180, 30, 271, 41))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(14)
self.label.setFont(font)
self.label.setObjectName("label")
self.button_save_one_ROI = QtWidgets.QPushButton(SaveROI)
self.button_save_one_ROI.setGeometry(QtCore.QRect(250, 400, 101, 31))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.button_save_one_ROI.setFont(font)
self.button_save_one_ROI.setStyleSheet("background-color: rgb(0, 0, 100);")
self.button_save_one_ROI.setObjectName("button_save_one_ROI")
self.scrollArea = QtWidgets.QScrollArea(SaveROI)
self.scrollArea.setGeometry(QtCore.QRect(90, 90, 441, 281))
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setAlignment(QtCore.Qt.AlignCenter)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents_2 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 439, 279))
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.scrollArea.setWidget(self.scrollAreaWidgetContents_2)
self.retranslateUi(SaveROI)
QtCore.QMetaObject.connectSlotsByName(SaveROI)
def retranslateUi(self, SaveROI):
_translate = QtCore.QCoreApplication.translate
SaveROI.setWindowTitle(_translate("SaveROI", "Dialog"))
self.label.setText(_translate("SaveROI", "Choose the label of ROI:"))
self.button_save_one_ROI.setText(_translate("SaveROI", "SAVE"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
SaveROI = QtWidgets.QDialog()
ui = Ui_SaveROI()
ui.setupUi(SaveROI)
SaveROI.show()
sys.exit(app.exec_())
|
[
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QDialog",
"PyQt5.QtGui.QFont",
"PyQt5.QtWidgets.QScrollArea",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QApplication"
] |
[((2381, 2413), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2403, 2413), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2428, 2447), 'PyQt5.QtWidgets.QDialog', 'QtWidgets.QDialog', ([], {}), '()\n', (2445, 2447), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((601, 626), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['SaveROI'], {}), '(SaveROI)\n', (617, 626), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((705, 718), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (716, 718), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((891, 921), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['SaveROI'], {}), '(SaveROI)\n', (912, 921), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1015, 1028), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1026, 1028), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1372, 1402), 'PyQt5.QtWidgets.QScrollArea', 'QtWidgets.QScrollArea', (['SaveROI'], {}), '(SaveROI)\n', (1393, 1402), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1674, 1693), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1691, 1693), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1972, 2018), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['SaveROI'], {}), '(SaveROI)\n', (2009, 2018), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((658, 688), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(180)', '(30)', '(271)', '(41)'], {}), '(180, 30, 271, 41)\n', (670, 688), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((967, 998), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(250)', '(400)', '(101)', '(31)'], {}), '(250, 400, 101, 31)\n', (979, 998), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1439, 1469), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(90)', '(441)', '(281)'], {}), '(90, 90, 441, 281)\n', (1451, 1469), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1746, 1774), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(439)', '(279)'], {}), '(0, 0, 439, 279)\n', (1758, 1774), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
dropout dsl
"""
import akg.tvm
from akg.lang import cce as dav
from akg.utils import custom_tiling as ct_util
def iou_set_dim_func(anchor_box, ground_truth_box):
tile_list = []
if anchor_box.shape[0].value > 1:
tile_list.append((1, 1))
if anchor_box.shape[1].value > 16:
tile_list.append((16, 16))
if len(tile_list) == 0:
return ""
return ct_util.set_dims(tuple(tile_list))
@ct_util.reg_set_dim_func(iou_set_dim_func)
def iou_for_train(anchor_box, ground_truth_box):
"""
Computes anchor_box and ground_truth_box's intersection-over-union
Args:
anchor_box (tvm.tensor.Tensor): Tensor of type float16.
ground_truth_box (tvm.tensor.Tensor): Tensor of type float16.
Returns:
tvm.tensor.Tensor, has same type and shape as anchor_box.
"""
anchor_box_dtype = anchor_box.dtype
ground_truth_box_dtype = ground_truth_box.dtype
shape1 = [x.value for x in anchor_box.shape]
shape2 = [x.value for x in ground_truth_box.shape]
out_shape = [shape1[0], shape1[1], shape2[1]]
check_list = ["float16"]
if not (anchor_box_dtype in check_list or ground_truth_box_dtype in check_list):
raise RuntimeError(
"dropout_do_mask only support %s while dtype is %s" % (",".join(check_list), anchor_box_dtype))
if len(shape1) != 3 or shape1[2] != 8:
raise ValueError("proposal box should be allocated as [batch_size, boxes, 8]")
if len(shape2) != 3 or shape2[2] != 8:
raise ValueError("proposal box should be allocated as [batch_size, boxes, 8]")
if shape1[1] % 16 != 0 or shape2[1] % 16 != 0:
raise ValueError(
'proposal box number only support in multiles of 16, please pad the data before implement this ops')
reducer = akg.tvm.comm_reducer(lambda x, y: y, lambda t: akg.tvm.const(0, dtype=t), name="reducer")
k = akg.tvm.reduce_axis((0, 8), name='k')
res = akg.tvm.compute(out_shape,
lambda bs, i, j: reducer(
dav.iou(anchor_box[bs, i, k], ground_truth_box[bs, j, k]), axis=k),
name="iou_area")
return res
|
[
"akg.utils.custom_tiling.reg_set_dim_func",
"akg.lang.cce.iou"
] |
[((1016, 1058), 'akg.utils.custom_tiling.reg_set_dim_func', 'ct_util.reg_set_dim_func', (['iou_set_dim_func'], {}), '(iou_set_dim_func)\n', (1040, 1058), True, 'from akg.utils import custom_tiling as ct_util\n'), ((2637, 2694), 'akg.lang.cce.iou', 'dav.iou', (['anchor_box[bs, i, k]', 'ground_truth_box[bs, j, k]'], {}), '(anchor_box[bs, i, k], ground_truth_box[bs, j, k])\n', (2644, 2694), True, 'from akg.lang import cce as dav\n')]
|
import os
from configs.Configurations import Configurations
configs = Configurations().configs
print(len(configs))
|
[
"configs.Configurations.Configurations"
] |
[((71, 87), 'configs.Configurations.Configurations', 'Configurations', ([], {}), '()\n', (85, 87), False, 'from configs.Configurations import Configurations\n')]
|
# Plot (and write to table?) autocorrelation and ESS for samplers with different
# stepsizes on some standard functions (no noise?). See also MCMCVisualizer
# plots and pymc3 plots.
# Plot the following:
# vs NUTS with different initial stepsizes
# vs HMC with different initial stepsizes
# vs SGHMC with different initial stepsizes
# vs SGLD with different initial stepsizes
# other plots definitely should go in appendix unless they have some super-unexpected results
# => show table with all results (what is a reasonable table format?)
# List of samplers:
# - SGHMC (sgmcmc, theano) [x]
# - SGHMCHD (pysgmcmc, keras) [x]
# - SGLD (pysgmcmc, tensorflow) [x] <- from tensorflow_probability
# - HMC(pymc3) [x]
# - NUTS (pymc3, pymc3) <- compare to NUTS here as well! [x]
# - slice/metropolis (pymc3) [x]
# Left to add:
# - SVGD (?)
# - RelSGHMC (re-implement as optimizer in pysgmcmc)
# -
import sys
from os.path import dirname, join as path_join
sys.path.insert(0, path_join(dirname(__file__), ".."))
sys.path.insert(0, path_join(dirname(__file__), "robo"))
sys.path.insert(0, path_join(dirname(__file__), "pysgmcmc_development"))
import numpy as np
from keras import backend as K
from collections import OrderedDict
from itertools import islice, product
from pysgmcmc.diagnostics import PYSGMCMCTrace
from pysgmcmc.samplers.energy_functions import (
to_negative_log_likelihood,
Banana, Gmm1, Gmm2, Gmm3, MoGL2HMC,
StandardNormal,
Donut, Squiggle,
)
from pysgmcmc.samplers.sghmc import SGHMCSampler
from pysgmcmc.samplers.sghmchd import SGHMCHDSampler
from pysgmcmc.samplers.sgld import SGLDSampler
from pysgmcmc_experiments.experiment_wrapper import to_experiment
import pymc3 as pm
import dill
# XXX: Handle variable naming properly.
EXPERIMENT_NAME = "energy_functions"
def init_hmc(model, stepsize, init="jitter+adapt_diag", chains=1):
from pymc3.step_methods.hmc import quadpotential
if init == 'jitter+adapt_diag':
start = []
for _ in range(chains):
mean = {var: val.copy() for var, val in model.test_point.items()}
for val in mean.values():
val[...] += 2 * np.random.rand(*val.shape) - 1
start.append(mean)
mean = np.mean([model.dict_to_array(vals) for vals in start], axis=0)
var = np.ones_like(mean)
potential = quadpotential.QuadPotentialDiagAdapt(
model.ndim, mean, var, 10)
return pm.step_methods.HamiltonianMC(
step_scale=stepsize,
potential=potential,
path_length=1
)
else:
raise NotImplementedError()
ENERGY_FUNCTIONS = OrderedDict((
("banana",
(Banana(), lambda: [K.random_normal_variable(mean=0., scale=1., shape=(1,)),
K.random_normal_variable(mean=0., scale=1., shape=(1,))])),
("gmm1",
(Gmm1(), lambda: [K.variable(K.random_normal((1,)), name="x", dtype=K.floatx())],)),
("gmm2", (
Gmm2(),
lambda: [K.variable(K.random_normal((1,)), name="x", dtype=K.floatx())],
)),
("gmm3",
(Gmm3(), lambda: [K.variable(K.random_normal((1,)), name="x", dtype=K.floatx())],)
),
("mogl2hmc",
(MoGL2HMC(), lambda: [K.variable(K.random_normal((1,)), name="x", dtype=K.floatx())],)
),
("standard_normal",
(StandardNormal(), lambda: [K.variable(K.random_normal((1,)), name="x", dtype=K.floatx())],)
),
("donut",
(Donut(), lambda: [K.random_normal_variable(mean=0., scale=1., shape=(1,)),
K.random_normal_variable(mean=0., scale=1., shape=(1,))])),
("squiggle",
(Squiggle(), lambda: [K.random_normal_variable(mean=0., scale=1., shape=(1,)),
K.random_normal_variable(mean=0., scale=1., shape=(1,))])),
))
PYMC3_SAMPLERS = ("NUTS", "HMC", "Metropolis", "Slice",)
SAMPLERS = OrderedDict((
("SGHMC", SGHMCSampler),
("SGHMCHD", SGHMCHDSampler),
("SGLD", SGLDSampler),
("NUTS", pm.step_methods.NUTS),
("HMC", pm.step_methods.HamiltonianMC),
("Metropolis", pm.step_methods.Metropolis),
("Slice", pm.step_methods.Slice),
))
STEPSIZES = tuple((
1e-12, 1e-10, 1e-8, 1e-6, 1e-4, 1e-2, 0.25, 0.5, 1.0,
))
CONFIGURATIONS = [
{"energy_function": energy_function, "sampler": sampler, "stepsize": stepsize}
for energy_function, sampler, stepsize in
product(ENERGY_FUNCTIONS, SAMPLERS, STEPSIZES)
if sampler not in ("Metropolis", "Slice")
]
CONFIGURATIONS.extend([
{"energy_function": energy_function, "sampler": sampler, "stepsize": None}
for energy_function, sampler in
product(ENERGY_FUNCTIONS, ("Metropolis", "Slice"))
])
def get_trace(sampler, stepsize, energy_function, _run, burn_in_steps=3000, sampling_steps=10 ** 4, num_chains=10):
energy_function_, initial_guess = ENERGY_FUNCTIONS[energy_function]
initial_sample = initial_guess()
sampler_cls = SAMPLERS[sampler]
if sampler in PYMC3_SAMPLERS:
def draw_trace(chain_id):
with pm.Model() as model:
energy_function_.to_pymc3()
if sampler == "NUTS":
from pymc3.sampling import init_nuts
start, step = init_nuts(
init="auto",
n_init=200000,
model=model,
progressbar=True
)
trace = pm.sample(
sampling_steps,
tune=burn_in_steps,
step=step,
chains=1,
chain_idx=chain_id,
start=start,
discard_tuned_samples=False
)
elif sampler == "HMC":
step = init_hmc(stepsize=stepsize, model=model)
trace = pm.sample(sampling_steps, tune=burn_in_steps, step=step, chains=1, discard_tuned_samples=False, chain_idx=chain_id)
else:
step = SAMPLERS[sampler]()
trace = pm.sample(
sampling_steps,
tune=burn_in_steps,
step=step,
chains=1,
chain_idx=chain_id,
discard_tuned_samples=False
)
return trace
def combine_traces(multitraces):
base_trace = multitraces[0]
for multitrace in multitraces[1:]:
for chain, strace in multitrace._straces.items():
if chain in base_trace._straces:
raise ValueError("Chains are not unique.")
base_trace._straces[chain] = strace
return base_trace
multitrace = combine_traces(
[draw_trace(chain_id=chain_id) for chain_id in range(num_chains)]
)
else:
def loss_for(sampler, energy_function):
def loss_fun(sample):
loss_tensor = to_negative_log_likelihood(energy_function)(sample)
for param in sample:
param.hypergradient = K.gradients(loss_tensor, param)
return loss_tensor
return loss_fun
def draw_chain():
loss = loss_for(sampler_cls, energy_function_)(initial_sample)
sampler_ = sampler_cls(params=initial_sample, loss=loss, lr=stepsize)
samples = np.asarray([
sample for _, sample in islice(sampler_, burn_in_steps + sampling_steps)
])
if np.isnan(samples).any():
print("Had nans.. iterating")
return draw_chain()
return np.squeeze(samples, 2)
multitrace = pm.backends.base.MultiTrace([PYSGMCMCTrace(draw_chain(), chain_id=id) for id in range(num_chains)])
output_filename = path_join(
dirname(__file__),
"../results/{}/{}/trace.pkl".format(EXPERIMENT_NAME, _run._id)
)
with open(output_filename, "wb") as trace_buffer:
dill.dump(multitrace, trace_buffer)
return True
experiment = to_experiment(
experiment_name=EXPERIMENT_NAME,
function=get_trace,
configurations=CONFIGURATIONS,
)
|
[
"pymc3.sample",
"pysgmcmc.samplers.energy_functions.Gmm1",
"numpy.isnan",
"numpy.random.rand",
"os.path.dirname",
"keras.backend.random_normal_variable",
"itertools.product",
"pymc3.step_methods.HamiltonianMC",
"pymc3.sampling.init_nuts",
"pysgmcmc.samplers.energy_functions.MoGL2HMC",
"keras.backend.gradients",
"pysgmcmc.samplers.energy_functions.Donut",
"numpy.ones_like",
"pymc3.Model",
"pysgmcmc.samplers.energy_functions.Squiggle",
"keras.backend.random_normal",
"itertools.islice",
"numpy.squeeze",
"dill.dump",
"pysgmcmc_experiments.experiment_wrapper.to_experiment",
"pysgmcmc.samplers.energy_functions.Banana",
"keras.backend.floatx",
"pysgmcmc.samplers.energy_functions.to_negative_log_likelihood",
"pysgmcmc.samplers.energy_functions.Gmm2",
"pymc3.step_methods.hmc.quadpotential.QuadPotentialDiagAdapt",
"pysgmcmc.samplers.energy_functions.Gmm3",
"collections.OrderedDict",
"pysgmcmc.samplers.energy_functions.StandardNormal"
] |
[((3859, 4112), 'collections.OrderedDict', 'OrderedDict', (["(('SGHMC', SGHMCSampler), ('SGHMCHD', SGHMCHDSampler), ('SGLD', SGLDSampler\n ), ('NUTS', pm.step_methods.NUTS), ('HMC', pm.step_methods.\n HamiltonianMC), ('Metropolis', pm.step_methods.Metropolis), ('Slice',\n pm.step_methods.Slice))"], {}), "((('SGHMC', SGHMCSampler), ('SGHMCHD', SGHMCHDSampler), ('SGLD',\n SGLDSampler), ('NUTS', pm.step_methods.NUTS), ('HMC', pm.step_methods.\n HamiltonianMC), ('Metropolis', pm.step_methods.Metropolis), ('Slice',\n pm.step_methods.Slice)))\n", (3870, 4112), False, 'from collections import OrderedDict\n'), ((8152, 8253), 'pysgmcmc_experiments.experiment_wrapper.to_experiment', 'to_experiment', ([], {'experiment_name': 'EXPERIMENT_NAME', 'function': 'get_trace', 'configurations': 'CONFIGURATIONS'}), '(experiment_name=EXPERIMENT_NAME, function=get_trace,\n configurations=CONFIGURATIONS)\n', (8165, 8253), False, 'from pysgmcmc_experiments.experiment_wrapper import to_experiment\n'), ((983, 1000), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (990, 1000), False, 'from os.path import dirname, join as path_join\n'), ((1038, 1055), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (1045, 1055), False, 'from os.path import dirname, join as path_join\n'), ((1095, 1112), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (1102, 1112), False, 'from os.path import dirname, join as path_join\n'), ((2312, 2330), 'numpy.ones_like', 'np.ones_like', (['mean'], {}), '(mean)\n', (2324, 2330), True, 'import numpy as np\n'), ((2351, 2414), 'pymc3.step_methods.hmc.quadpotential.QuadPotentialDiagAdapt', 'quadpotential.QuadPotentialDiagAdapt', (['model.ndim', 'mean', 'var', '(10)'], {}), '(model.ndim, mean, var, 10)\n', (2387, 2414), False, 'from pymc3.step_methods.hmc import quadpotential\n'), ((2444, 2534), 'pymc3.step_methods.HamiltonianMC', 'pm.step_methods.HamiltonianMC', ([], {'step_scale': 'stepsize', 'potential': 'potential', 'path_length': '(1)'}), '(step_scale=stepsize, potential=potential,\n path_length=1)\n', (2473, 2534), True, 'import pymc3 as pm\n'), ((4366, 4412), 'itertools.product', 'product', (['ENERGY_FUNCTIONS', 'SAMPLERS', 'STEPSIZES'], {}), '(ENERGY_FUNCTIONS, SAMPLERS, STEPSIZES)\n', (4373, 4412), False, 'from itertools import islice, product\n'), ((7927, 7944), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (7934, 7944), False, 'from os.path import dirname, join as path_join\n'), ((8085, 8120), 'dill.dump', 'dill.dump', (['multitrace', 'trace_buffer'], {}), '(multitrace, trace_buffer)\n', (8094, 8120), False, 'import dill\n'), ((4605, 4655), 'itertools.product', 'product', (['ENERGY_FUNCTIONS', "('Metropolis', 'Slice')"], {}), "(ENERGY_FUNCTIONS, ('Metropolis', 'Slice'))\n", (4612, 4655), False, 'from itertools import islice, product\n'), ((7740, 7762), 'numpy.squeeze', 'np.squeeze', (['samples', '(2)'], {}), '(samples, 2)\n', (7750, 7762), True, 'import numpy as np\n'), ((2679, 2687), 'pysgmcmc.samplers.energy_functions.Banana', 'Banana', ([], {}), '()\n', (2685, 2687), False, 'from pysgmcmc.samplers.energy_functions import to_negative_log_likelihood, Banana, Gmm1, Gmm2, Gmm3, MoGL2HMC, StandardNormal, Donut, Squiggle\n'), ((2859, 2865), 'pysgmcmc.samplers.energy_functions.Gmm1', 'Gmm1', ([], {}), '()\n', (2863, 2865), False, 'from pysgmcmc.samplers.energy_functions import to_negative_log_likelihood, Banana, Gmm1, Gmm2, Gmm3, MoGL2HMC, StandardNormal, Donut, Squiggle\n'), ((2966, 2972), 'pysgmcmc.samplers.energy_functions.Gmm2', 'Gmm2', ([], {}), '()\n', (2970, 2972), False, 'from pysgmcmc.samplers.energy_functions import to_negative_log_likelihood, Banana, Gmm1, Gmm2, Gmm3, MoGL2HMC, StandardNormal, Donut, Squiggle\n'), ((3082, 3088), 'pysgmcmc.samplers.energy_functions.Gmm3', 'Gmm3', ([], {}), '()\n', (3086, 3088), False, 'from pysgmcmc.samplers.energy_functions import to_negative_log_likelihood, Banana, Gmm1, Gmm2, Gmm3, MoGL2HMC, StandardNormal, Donut, Squiggle\n'), ((3195, 3205), 'pysgmcmc.samplers.energy_functions.MoGL2HMC', 'MoGL2HMC', ([], {}), '()\n', (3203, 3205), False, 'from pysgmcmc.samplers.energy_functions import to_negative_log_likelihood, Banana, Gmm1, Gmm2, Gmm3, MoGL2HMC, StandardNormal, Donut, Squiggle\n'), ((3319, 3335), 'pysgmcmc.samplers.energy_functions.StandardNormal', 'StandardNormal', ([], {}), '()\n', (3333, 3335), False, 'from pysgmcmc.samplers.energy_functions import to_negative_log_likelihood, Banana, Gmm1, Gmm2, Gmm3, MoGL2HMC, StandardNormal, Donut, Squiggle\n'), ((3439, 3446), 'pysgmcmc.samplers.energy_functions.Donut', 'Donut', ([], {}), '()\n', (3444, 3446), False, 'from pysgmcmc.samplers.energy_functions import to_negative_log_likelihood, Banana, Gmm1, Gmm2, Gmm3, MoGL2HMC, StandardNormal, Donut, Squiggle\n'), ((3621, 3631), 'pysgmcmc.samplers.energy_functions.Squiggle', 'Squiggle', ([], {}), '()\n', (3629, 3631), False, 'from pysgmcmc.samplers.energy_functions import to_negative_log_likelihood, Banana, Gmm1, Gmm2, Gmm3, MoGL2HMC, StandardNormal, Donut, Squiggle\n'), ((5008, 5018), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (5016, 5018), True, 'import pymc3 as pm\n'), ((5202, 5270), 'pymc3.sampling.init_nuts', 'init_nuts', ([], {'init': '"""auto"""', 'n_init': '(200000)', 'model': 'model', 'progressbar': '(True)'}), "(init='auto', n_init=200000, model=model, progressbar=True)\n", (5211, 5270), False, 'from pymc3.sampling import init_nuts\n'), ((5418, 5550), 'pymc3.sample', 'pm.sample', (['sampling_steps'], {'tune': 'burn_in_steps', 'step': 'step', 'chains': '(1)', 'chain_idx': 'chain_id', 'start': 'start', 'discard_tuned_samples': '(False)'}), '(sampling_steps, tune=burn_in_steps, step=step, chains=1,\n chain_idx=chain_id, start=start, discard_tuned_samples=False)\n', (5427, 5550), True, 'import pymc3 as pm\n'), ((7047, 7090), 'pysgmcmc.samplers.energy_functions.to_negative_log_likelihood', 'to_negative_log_likelihood', (['energy_function'], {}), '(energy_function)\n', (7073, 7090), False, 'from pysgmcmc.samplers.energy_functions import to_negative_log_likelihood, Banana, Gmm1, Gmm2, Gmm3, MoGL2HMC, StandardNormal, Donut, Squiggle\n'), ((7178, 7209), 'keras.backend.gradients', 'K.gradients', (['loss_tensor', 'param'], {}), '(loss_tensor, param)\n', (7189, 7209), True, 'from keras import backend as K\n'), ((7613, 7630), 'numpy.isnan', 'np.isnan', (['samples'], {}), '(samples)\n', (7621, 7630), True, 'import numpy as np\n'), ((2158, 2184), 'numpy.random.rand', 'np.random.rand', (['*val.shape'], {}), '(*val.shape)\n', (2172, 2184), True, 'import numpy as np\n'), ((2698, 2755), 'keras.backend.random_normal_variable', 'K.random_normal_variable', ([], {'mean': '(0.0)', 'scale': '(1.0)', 'shape': '(1,)'}), '(mean=0.0, scale=1.0, shape=(1,))\n', (2722, 2755), True, 'from keras import backend as K\n'), ((2780, 2837), 'keras.backend.random_normal_variable', 'K.random_normal_variable', ([], {'mean': '(0.0)', 'scale': '(1.0)', 'shape': '(1,)'}), '(mean=0.0, scale=1.0, shape=(1,))\n', (2804, 2837), True, 'from keras import backend as K\n'), ((3457, 3514), 'keras.backend.random_normal_variable', 'K.random_normal_variable', ([], {'mean': '(0.0)', 'scale': '(1.0)', 'shape': '(1,)'}), '(mean=0.0, scale=1.0, shape=(1,))\n', (3481, 3514), True, 'from keras import backend as K\n'), ((3538, 3595), 'keras.backend.random_normal_variable', 'K.random_normal_variable', ([], {'mean': '(0.0)', 'scale': '(1.0)', 'shape': '(1,)'}), '(mean=0.0, scale=1.0, shape=(1,))\n', (3562, 3595), True, 'from keras import backend as K\n'), ((3642, 3699), 'keras.backend.random_normal_variable', 'K.random_normal_variable', ([], {'mean': '(0.0)', 'scale': '(1.0)', 'shape': '(1,)'}), '(mean=0.0, scale=1.0, shape=(1,))\n', (3666, 3699), True, 'from keras import backend as K\n'), ((3726, 3783), 'keras.backend.random_normal_variable', 'K.random_normal_variable', ([], {'mean': '(0.0)', 'scale': '(1.0)', 'shape': '(1,)'}), '(mean=0.0, scale=1.0, shape=(1,))\n', (3750, 3783), True, 'from keras import backend as K\n'), ((5872, 5991), 'pymc3.sample', 'pm.sample', (['sampling_steps'], {'tune': 'burn_in_steps', 'step': 'step', 'chains': '(1)', 'discard_tuned_samples': '(False)', 'chain_idx': 'chain_id'}), '(sampling_steps, tune=burn_in_steps, step=step, chains=1,\n discard_tuned_samples=False, chain_idx=chain_id)\n', (5881, 5991), True, 'import pymc3 as pm\n'), ((6085, 6204), 'pymc3.sample', 'pm.sample', (['sampling_steps'], {'tune': 'burn_in_steps', 'step': 'step', 'chains': '(1)', 'chain_idx': 'chain_id', 'discard_tuned_samples': '(False)'}), '(sampling_steps, tune=burn_in_steps, step=step, chains=1,\n chain_idx=chain_id, discard_tuned_samples=False)\n', (6094, 6204), True, 'import pymc3 as pm\n'), ((7533, 7581), 'itertools.islice', 'islice', (['sampler_', '(burn_in_steps + sampling_steps)'], {}), '(sampler_, burn_in_steps + sampling_steps)\n', (7539, 7581), False, 'from itertools import islice, product\n'), ((2887, 2908), 'keras.backend.random_normal', 'K.random_normal', (['(1,)'], {}), '((1,))\n', (2902, 2908), True, 'from keras import backend as K\n'), ((3002, 3023), 'keras.backend.random_normal', 'K.random_normal', (['(1,)'], {}), '((1,))\n', (3017, 3023), True, 'from keras import backend as K\n'), ((3110, 3131), 'keras.backend.random_normal', 'K.random_normal', (['(1,)'], {}), '((1,))\n', (3125, 3131), True, 'from keras import backend as K\n'), ((3227, 3248), 'keras.backend.random_normal', 'K.random_normal', (['(1,)'], {}), '((1,))\n', (3242, 3248), True, 'from keras import backend as K\n'), ((3357, 3378), 'keras.backend.random_normal', 'K.random_normal', (['(1,)'], {}), '((1,))\n', (3372, 3378), True, 'from keras import backend as K\n'), ((2926, 2936), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (2934, 2936), True, 'from keras import backend as K\n'), ((3041, 3051), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3049, 3051), True, 'from keras import backend as K\n'), ((3149, 3159), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3157, 3159), True, 'from keras import backend as K\n'), ((3266, 3276), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3274, 3276), True, 'from keras import backend as K\n'), ((3396, 3406), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3404, 3406), True, 'from keras import backend as K\n')]
|
import logging
import time
import unittest
from neurolib.models.aln import ALNModel
from neurolib.models.fhn import FHNModel
from neurolib.models.hopf import HopfModel
from neurolib.models.thalamus import ThalamicMassModel
from neurolib.models.wc import WCModel
from neurolib.models.subdivwc import SubDivWCModel
from neurolib.utils.loadData import Dataset
class TestAln(unittest.TestCase):
"""
Basic test for ALN model.
"""
def test_single_node(self):
import neurolib.models.aln.loadDefaultParams as dp
logging.info("\t > ALN: Testing single node ...")
start = time.time()
aln = ALNModel()
aln.params["duration"] = 10.0 * 1000
aln.params["sigma_ou"] = 0.1 # add some noise
# load new initial parameters
aln.run(bold=True)
# access outputs
aln.xr()
aln.xr("BOLD")
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
def test_network(self):
logging.info("\t > ALN: Testing brain network (chunkwise integration and BOLD" " simulation) ...")
start = time.time()
ds = Dataset("gw")
aln = ALNModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
aln.params["duration"] = 10 * 1000
aln.run(chunkwise=True, bold=True, append_outputs=True)
# access outputs
aln.xr()
aln.xr("BOLD")
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
class TestHopf(unittest.TestCase):
"""
Basic test for Hopf model.
"""
def test_single_node(self):
logging.info("\t > Hopf: Testing single node ...")
start = time.time()
hopf = HopfModel()
hopf.params["duration"] = 2.0 * 1000
hopf.params["sigma_ou"] = 0.03
hopf.run()
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
def test_network(self):
logging.info("\t > Hopf: Testing brain network (chunkwise integration and BOLD" " simulation) ...")
start = time.time()
ds = Dataset("gw")
hopf = HopfModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
hopf.params["w"] = 1.0
hopf.params["signalV"] = 0
hopf.params["duration"] = 10 * 1000
hopf.params["sigma_ou"] = 0.14
hopf.params["K_gl"] = 0.6
hopf.run(chunkwise=True, bold=True, append_outputs=True)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
class TestFHN(unittest.TestCase):
"""
Basic test for FHN model.
"""
def test_single_node(self):
logging.info("\t > FHN: Testing single node ...")
start = time.time()
fhn = FHNModel()
fhn.params["duration"] = 2.0 * 1000
fhn.params["sigma_ou"] = 0.03
fhn.run()
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
def test_network(self):
logging.info("\t > FHN: Testing brain network (chunkwise integration and BOLD simulation) ...")
start = time.time()
ds = Dataset("gw")
fhn = FHNModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
fhn.params["signalV"] = 4.0
fhn.params["duration"] = 10 * 1000
fhn.params["sigma_ou"] = 0.1
fhn.params["K_gl"] = 0.6
fhn.params["x_ext_mean"] = 0.72
fhn.run(chunkwise=True, bold=True, append_outputs=True)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
class TestWC(unittest.TestCase):
"""
Basic test for WC model.
"""
def test_single_node(self):
logging.info("\t > WC: Testing single node ...")
start = time.time()
wc = WCModel()
wc.params["duration"] = 2.0 * 1000
wc.params["sigma_ou"] = 0.03
wc.run()
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
def test_network(self):
logging.info("\t > WC: Testing brain network (chunkwise integration and BOLD simulation) ...")
start = time.time()
ds = Dataset("gw")
wc = WCModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
wc.params["signalV"] = 4.0
wc.params["duration"] = 10 * 1000
wc.params["sigma_ou"] = 0.1
wc.params["K_gl"] = 0.6
wc.params["x_ext_mean"] = 0.72
wc.run(chunkwise=True, bold=True, append_outputs=True)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
class TestThalamus(unittest.TestCase):
"""
Basic test for thalamic mass model.
"""
def test_single_node(self):
logging.info("\t > Thalamus: Testing single node ...")
start = time.time()
thalamus = ThalamicMassModel()
thalamus.params["duration"] = 2.0 * 1000
thalamus.run()
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
class TestSubDivWC(unittest.TestCase):
"""
Basic test for subtractive/divisive WC model.
"""
def test_single_node(self):
logging.info("\t > SubDivWC: Testing single node ...")
start = time.time()
subdivwc = SubDivWCModel()
subdivwc.params["duration"] = 2.0 * 1000
subdivwc.params["sigma_ou"] = 0.03
subdivwc.run()
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
def test_network(self):
logging.info("\t > SubDivWC: Testing brain network (chunkwise integration and BOLD simulation) ...")
start = time.time()
ds = Dataset("gw")
subdivwc = SubDivWCModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
subdivwc.params["signalV"] = 4.0
subdivwc.params["duration"] = 10 * 1000
subdivwc.params["sigma_ou"] = 0.1
subdivwc.params["K_gl"] = 0.6
subdivwc.params["exc_ext_mean"] = 0.72
subdivwc.run(chunkwise=True, bold=True, append_outputs=True)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"neurolib.models.fhn.FHNModel",
"neurolib.models.subdivwc.SubDivWCModel",
"neurolib.models.wc.WCModel",
"time.time",
"logging.info",
"neurolib.models.aln.ALNModel",
"neurolib.models.thalamus.ThalamicMassModel",
"neurolib.utils.loadData.Dataset",
"neurolib.models.hopf.HopfModel"
] |
[((6055, 6070), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6068, 6070), False, 'import unittest\n'), ((541, 590), 'logging.info', 'logging.info', (['"""\t > ALN: Testing single node ..."""'], {}), "('\\t > ALN: Testing single node ...')\n", (553, 590), False, 'import logging\n'), ((607, 618), 'time.time', 'time.time', ([], {}), '()\n', (616, 618), False, 'import time\n'), ((634, 644), 'neurolib.models.aln.ALNModel', 'ALNModel', ([], {}), '()\n', (642, 644), False, 'from neurolib.models.aln import ALNModel\n'), ((890, 901), 'time.time', 'time.time', ([], {}), '()\n', (899, 901), False, 'import time\n'), ((1005, 1110), 'logging.info', 'logging.info', (['"""\t > ALN: Testing brain network (chunkwise integration and BOLD simulation) ..."""'], {}), "(\n '\\t > ALN: Testing brain network (chunkwise integration and BOLD simulation) ...'\n )\n", (1017, 1110), False, 'import logging\n'), ((1120, 1131), 'time.time', 'time.time', ([], {}), '()\n', (1129, 1131), False, 'import time\n'), ((1146, 1159), 'neurolib.utils.loadData.Dataset', 'Dataset', (['"""gw"""'], {}), "('gw')\n", (1153, 1159), False, 'from neurolib.utils.loadData import Dataset\n'), ((1175, 1211), 'neurolib.models.aln.ALNModel', 'ALNModel', ([], {'Cmat': 'ds.Cmat', 'Dmat': 'ds.Dmat'}), '(Cmat=ds.Cmat, Dmat=ds.Dmat)\n', (1183, 1211), False, 'from neurolib.models.aln import ALNModel\n'), ((1402, 1413), 'time.time', 'time.time', ([], {}), '()\n', (1411, 1413), False, 'import time\n'), ((1605, 1655), 'logging.info', 'logging.info', (['"""\t > Hopf: Testing single node ..."""'], {}), "('\\t > Hopf: Testing single node ...')\n", (1617, 1655), False, 'import logging\n'), ((1672, 1683), 'time.time', 'time.time', ([], {}), '()\n', (1681, 1683), False, 'import time\n'), ((1699, 1710), 'neurolib.models.hopf.HopfModel', 'HopfModel', ([], {}), '()\n', (1708, 1710), False, 'from neurolib.models.hopf import HopfModel\n'), ((1830, 1841), 'time.time', 'time.time', ([], {}), '()\n', (1839, 1841), False, 'import time\n'), ((1945, 2051), 'logging.info', 'logging.info', (['"""\t > Hopf: Testing brain network (chunkwise integration and BOLD simulation) ..."""'], {}), "(\n '\\t > Hopf: Testing brain network (chunkwise integration and BOLD simulation) ...'\n )\n", (1957, 2051), False, 'import logging\n'), ((2061, 2072), 'time.time', 'time.time', ([], {}), '()\n', (2070, 2072), False, 'import time\n'), ((2086, 2099), 'neurolib.utils.loadData.Dataset', 'Dataset', (['"""gw"""'], {}), "('gw')\n", (2093, 2099), False, 'from neurolib.utils.loadData import Dataset\n'), ((2115, 2152), 'neurolib.models.hopf.HopfModel', 'HopfModel', ([], {'Cmat': 'ds.Cmat', 'Dmat': 'ds.Dmat'}), '(Cmat=ds.Cmat, Dmat=ds.Dmat)\n', (2124, 2152), False, 'from neurolib.models.hopf import HopfModel\n'), ((2417, 2428), 'time.time', 'time.time', ([], {}), '()\n', (2426, 2428), False, 'import time\n'), ((2618, 2667), 'logging.info', 'logging.info', (['"""\t > FHN: Testing single node ..."""'], {}), "('\\t > FHN: Testing single node ...')\n", (2630, 2667), False, 'import logging\n'), ((2684, 2695), 'time.time', 'time.time', ([], {}), '()\n', (2693, 2695), False, 'import time\n'), ((2710, 2720), 'neurolib.models.fhn.FHNModel', 'FHNModel', ([], {}), '()\n', (2718, 2720), False, 'from neurolib.models.fhn import FHNModel\n'), ((2837, 2848), 'time.time', 'time.time', ([], {}), '()\n', (2846, 2848), False, 'import time\n'), ((2952, 3057), 'logging.info', 'logging.info', (['"""\t > FHN: Testing brain network (chunkwise integration and BOLD simulation) ..."""'], {}), "(\n '\\t > FHN: Testing brain network (chunkwise integration and BOLD simulation) ...'\n )\n", (2964, 3057), False, 'import logging\n'), ((3064, 3075), 'time.time', 'time.time', ([], {}), '()\n', (3073, 3075), False, 'import time\n'), ((3089, 3102), 'neurolib.utils.loadData.Dataset', 'Dataset', (['"""gw"""'], {}), "('gw')\n", (3096, 3102), False, 'from neurolib.utils.loadData import Dataset\n'), ((3117, 3153), 'neurolib.models.fhn.FHNModel', 'FHNModel', ([], {'Cmat': 'ds.Cmat', 'Dmat': 'ds.Dmat'}), '(Cmat=ds.Cmat, Dmat=ds.Dmat)\n', (3125, 3153), False, 'from neurolib.models.fhn import FHNModel\n'), ((3422, 3433), 'time.time', 'time.time', ([], {}), '()\n', (3431, 3433), False, 'import time\n'), ((3621, 3669), 'logging.info', 'logging.info', (['"""\t > WC: Testing single node ..."""'], {}), "('\\t > WC: Testing single node ...')\n", (3633, 3669), False, 'import logging\n'), ((3686, 3697), 'time.time', 'time.time', ([], {}), '()\n', (3695, 3697), False, 'import time\n'), ((3711, 3720), 'neurolib.models.wc.WCModel', 'WCModel', ([], {}), '()\n', (3718, 3720), False, 'from neurolib.models.wc import WCModel\n'), ((3834, 3845), 'time.time', 'time.time', ([], {}), '()\n', (3843, 3845), False, 'import time\n'), ((3949, 4053), 'logging.info', 'logging.info', (['"""\t > WC: Testing brain network (chunkwise integration and BOLD simulation) ..."""'], {}), "(\n '\\t > WC: Testing brain network (chunkwise integration and BOLD simulation) ...'\n )\n", (3961, 4053), False, 'import logging\n'), ((4060, 4071), 'time.time', 'time.time', ([], {}), '()\n', (4069, 4071), False, 'import time\n'), ((4085, 4098), 'neurolib.utils.loadData.Dataset', 'Dataset', (['"""gw"""'], {}), "('gw')\n", (4092, 4098), False, 'from neurolib.utils.loadData import Dataset\n'), ((4112, 4147), 'neurolib.models.wc.WCModel', 'WCModel', ([], {'Cmat': 'ds.Cmat', 'Dmat': 'ds.Dmat'}), '(Cmat=ds.Cmat, Dmat=ds.Dmat)\n', (4119, 4147), False, 'from neurolib.models.wc import WCModel\n'), ((4410, 4421), 'time.time', 'time.time', ([], {}), '()\n', (4419, 4421), False, 'import time\n'), ((4626, 4680), 'logging.info', 'logging.info', (['"""\t > Thalamus: Testing single node ..."""'], {}), "('\\t > Thalamus: Testing single node ...')\n", (4638, 4680), False, 'import logging\n'), ((4697, 4708), 'time.time', 'time.time', ([], {}), '()\n', (4706, 4708), False, 'import time\n'), ((4728, 4747), 'neurolib.models.thalamus.ThalamicMassModel', 'ThalamicMassModel', ([], {}), '()\n', (4745, 4747), False, 'from neurolib.models.thalamus import ThalamicMassModel\n'), ((4836, 4847), 'time.time', 'time.time', ([], {}), '()\n', (4845, 4847), False, 'import time\n'), ((5062, 5116), 'logging.info', 'logging.info', (['"""\t > SubDivWC: Testing single node ..."""'], {}), "('\\t > SubDivWC: Testing single node ...')\n", (5074, 5116), False, 'import logging\n'), ((5133, 5144), 'time.time', 'time.time', ([], {}), '()\n', (5142, 5144), False, 'import time\n'), ((5164, 5179), 'neurolib.models.subdivwc.SubDivWCModel', 'SubDivWCModel', ([], {}), '()\n', (5177, 5179), False, 'from neurolib.models.subdivwc import SubDivWCModel\n'), ((5311, 5322), 'time.time', 'time.time', ([], {}), '()\n', (5320, 5322), False, 'import time\n'), ((5426, 5536), 'logging.info', 'logging.info', (['"""\t > SubDivWC: Testing brain network (chunkwise integration and BOLD simulation) ..."""'], {}), "(\n '\\t > SubDivWC: Testing brain network (chunkwise integration and BOLD simulation) ...'\n )\n", (5438, 5536), False, 'import logging\n'), ((5543, 5554), 'time.time', 'time.time', ([], {}), '()\n', (5552, 5554), False, 'import time\n'), ((5568, 5581), 'neurolib.utils.loadData.Dataset', 'Dataset', (['"""gw"""'], {}), "('gw')\n", (5575, 5581), False, 'from neurolib.utils.loadData import Dataset\n'), ((5601, 5642), 'neurolib.models.subdivwc.SubDivWCModel', 'SubDivWCModel', ([], {'Cmat': 'ds.Cmat', 'Dmat': 'ds.Dmat'}), '(Cmat=ds.Cmat, Dmat=ds.Dmat)\n', (5614, 5642), False, 'from neurolib.models.subdivwc import SubDivWCModel\n'), ((5943, 5954), 'time.time', 'time.time', ([], {}), '()\n', (5952, 5954), False, 'import time\n')]
|
from django.db import models
# Create your models here.
class Note(models.Model):
title = models.CharField(
max_length= 30,
)
content = models.TextField()
image_url = models.URLField()
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.URLField"
] |
[((95, 126), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (111, 126), False, 'from django.db import models\n'), ((157, 175), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (173, 175), False, 'from django.db import models\n'), ((192, 209), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (207, 209), False, 'from django.db import models\n')]
|
# 1. import Flask
from flask import Flask
# 2. Create an app, being sure to pass __name__
app = Flask(__name__)
# 3. Define what to do when a user hits the index route
@app.route("/")
def home():
print("Hello World")
return "Hello World!"
@app.route("/about")
def home():
print("Krys - Phoenix")
return "Krys - Phoenix"
@app.route("/contact")
def home():
print("email me at...")
return "email me at..."
if __name__ == "__main__":
app.run(debug=True)
|
[
"flask.Flask"
] |
[((97, 112), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (102, 112), False, 'from flask import Flask\n')]
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : keep_sync_window
Case Name : failover后,同步备异常,验证时间窗口参数是否生效
Description :
1.停止主节点
1.1.进行failover
1.2.进行refreshconf
2.failover后查询参数默认值
3.设置guc参数
3.1设置synchronous_standby_names为'dn_6001'
3.2设置synchronous_commit=on
3.3设置most_available_sync=on
3.4设置keep_sync_window=180
4.查询修改后的参数值
5.查询集群同步方式
6.建表
7.停止集群
8.启动主节点
9.停止同步备
10.主节点插入数据
11.查看备机数据是否同步
12.清理环境
Expect :
1.成功
1.1.成功
1.2.成功
2.和failover前一致
3.设置成功
4.设置成功
5.集群状态为Quorum
6.成功
7.停止成功
8.启动成功
9.成功
10.时间窗内同步备未恢复连接;180s后进入最大可用模式,数据插入成功
11.同步成功
12.清理环境完成
History :
"""
import time
import unittest
from testcase.utils.ComThread import ComThread
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
Primary_SH = CommonSH('PrimaryDbUser')
@unittest.skipIf(3 != Primary_SH.get_node_num(), '非1+2环境不执行')
class GucParameters(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('-Opengauss_Function_Keep_Sync_Window_Case0023 start-')
self.constant = Constant()
self.common = Common()
self.standby_sh1 = CommonSH('Standby1DbUser')
self.standby_sh2 = CommonSH('Standby2DbUser')
self.db_primary_user_node = Node(node='PrimaryDbUser')
self.db_standby1_user_node = Node(node='Standby1DbUser')
self.standby_root_node1 = Node(node='Standby1Root')
self.log.info('failover前查询参数默认值')
self.default_value1 = self.common.show_param('keep_sync_window')
self.default_value2 = self.common.show_param(
'synchronous_standby_names')
self.default_value3 = self.common.show_param('synchronous_commit')
self.default_value4 = self.common.show_param('most_available_sync')
self.stand_value = self.standby_sh1.execut_db_sql(
'show synchronous_standby_names;')
self.log.info(self.stand_value)
self.tb_name = "tb_keep_sync_window_0023"
def test_keep_sync_window(self):
text = '--step1:停止主节点;expect:成功--'
self.log.info(text)
stop_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_ctl stop -D {macro.DB_INSTANCE_PATH};"
self.log.info(stop_cmd)
excute_msg = self.db_primary_user_node.sh(stop_cmd).result()
self.log.info(excute_msg)
self.assertIn(self.constant.GS_CTL_STOP_SUCCESS_MSG, excute_msg,
'执行失败' + text)
text = '--step1.1:进行failover;expect:成功--'
self.log.info(text)
excute_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_ctl failover -D {macro.DB_INSTANCE_PATH} -m fast;"
self.log.info(excute_cmd)
excute_msg = self.db_standby1_user_node.sh(excute_cmd).result()
self.log.info(excute_msg)
self.assertIn(self.constant.FAILOVER_SUCCESS_MSG, excute_msg,
'执行失败' + text)
self.log.info('---step1.2:进行refreshconf;expect:成功')
excute_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_om -t refreshconf;"
self.log.info(excute_cmd)
excute_msg = self.db_standby1_user_node.sh(excute_cmd).result()
self.log.info(excute_msg)
self.assertIn(self.constant.REFRESHCONF_SUCCESS_MSG, excute_msg,
'执行失败' + text)
restart_msg = self.standby_sh1.restart_db_cluster()
self.log.info(restart_msg)
self.assertTrue(restart_msg, '执行失败' + text)
text = '--step2:failover后查询参数默认值;expect:和failoverover前一致--'
self.log.info(text)
new_value1 = self.standby_sh1.execut_db_sql('show keep_sync_window;')
self.log.info(new_value1)
self.assertEqual(self.default_value1,
new_value1.splitlines()[2].strip(), '执行失败' + text)
new_value2 = self.standby_sh1.execut_db_sql(
'show synchronous_standby_names;')
self.log.info(new_value2)
self.assertEqual(self.stand_value.splitlines()[2].strip(),
new_value2.splitlines()[2].strip(), '执行失败' + text)
new_value3 = self.standby_sh1.execut_db_sql('show synchronous_commit;')
self.log.info(new_value3)
self.assertEqual(self.default_value3,
new_value3.splitlines()[2].strip(), '执行失败' + text)
new_value4 = self.standby_sh1.execut_db_sql(
'show most_available_sync;')
self.log.info(new_value4)
self.assertEqual(self.default_value4,
new_value4.splitlines()[2].strip(), '执行失败' + text)
text = '--step3:设置参数;expect:成功--'
self.log.info(text)
self.log.info('设置synchronous_commit=on')
result = self.standby_sh1.execute_gsguc("reload",
self.constant.GSGUC_SUCCESS_MSG,
f"synchronous_commit=on")
self.assertTrue(result, '执行失败' + text)
self.log.info('设置most_available_sync=on')
result = self.standby_sh1.execute_gsguc("reload",
self.constant.GSGUC_SUCCESS_MSG,
f"most_available_sync=on")
self.assertTrue(result, '执行失败' + text)
self.log.info('设置keep_sync_window=180')
result = self.standby_sh1.execute_gsguc("reload",
self.constant.GSGUC_SUCCESS_MSG,
f"keep_sync_window=180")
self.assertTrue(result, '执行失败' + text)
result = self.standby_sh1.restart_db_cluster()
self.assertTrue(result, '执行失败' + text)
text = '--step4:查询修改后的参数值;expect:修改成功--'
self.log.info(text)
result1 = self.standby_sh1.execut_db_sql('show synchronous_commit;')
self.log.info(result1)
self.assertIn('on', result1, '执行失败' + text)
result2 = self.standby_sh1.execut_db_sql('show most_available_sync;')
self.log.info(result2)
self.assertIn('on', result2, '执行失败' + text)
result3 = self.standby_sh1.execut_db_sql('show keep_sync_window;')
self.log.info(result3)
self.assertIn('3min', result3, '执行失败' + text)
text = '--step5:建表;expect:成功--'
self.log.info(text)
sql_cmd = self.standby_sh1.execut_db_sql(f"drop table if exists "
f"{self.tb_name};"
f"create table {self.tb_name}"
f"(id int,name text);")
self.log.info(sql_cmd)
self.assertIn(self.constant.TABLE_CREATE_SUCCESS, sql_cmd,
'执行失败' + text)
text = '--step6:停止集群;expect:停止成功--'
self.log.info(text)
stop_cmd = self.standby_sh1.stop_db_cluster()
self.log.info(stop_cmd)
self.assertTrue(stop_cmd, '执行失败' + text)
text = '--step7:gs_ctl方式启动主节点;expect:启动成功--'
self.log.info(text)
start_cmd = self.standby_sh1.start_db_instance(mode="primary")
self.log.info(start_cmd)
self.assertTrue(start_cmd, '执行失败' + text)
text = '--step8:停止同步备;expect:成功--'
self.log.info(text)
stop_cmd1 = Primary_SH.stop_db_instance()
self.log.info(stop_cmd1)
self.assertTrue(stop_cmd1, '执行失败' + text)
stop_cmd2 = self.standby_sh2.stop_db_instance()
self.log.info(stop_cmd2)
self.assertTrue(stop_cmd2, '执行失败' + text)
text = '--step9:主节点插入数据;expect:时间窗内同步备未恢复连接;' \
'180s后进入最大可用模式,数据插入成功--'
self.log.info(text)
sql_cmd = f"insert into {self.tb_name} values(generate_series(1,10)," \
f"'column_'|| generate_series(1,10));"
self.log.info(sql_cmd)
insert_thread = ComThread(self.standby_sh1.execut_db_sql,
args=(sql_cmd,))
insert_thread.setDaemon(True)
insert_thread.start()
self.log.info('获取step6结果')
insert_thread.join(10 * 600)
insert_thread_result = insert_thread.get_result()
self.log.info(insert_thread_result)
self.assertIn(self.constant.INSERT_SUCCESS_MSG, insert_thread_result,
'执行失败:' + text)
self.assertIn(self.constant.keep_sync_window_msg, insert_thread_result,
'执行失败:' + text)
time.sleep(200)
text = '--step10:启动集群;expect:启动成功--'
self.log.info(text)
start_cmd = self.standby_sh1.restart_db_cluster()
self.log.info(start_cmd)
self.assertTrue(start_cmd, '执行失败' + text)
text = '----step11:查看备机数据是否同步;expect:同步成功----'
self.log.info(text)
sql_cmd = f"select * from {self.tb_name};"
msg_primary = Primary_SH.execut_db_sql(sql_cmd)
self.log.info(msg_primary)
msg_standby1 = self.standby_sh1.execut_db_sql(sql_cmd)
self.log.info(msg_standby1)
self.assertEqual(msg_primary, msg_standby1, '执行失败:' + text)
msg_standby2 = self.standby_sh2.execut_db_sql(sql_cmd)
self.log.info(msg_standby2)
self.assertEqual(msg_primary, msg_standby2, '执行失败:' + text)
def tearDown(self):
text = '--step12:清理环境;expect:清理环境完成--'
self.log.info(text)
self.log.info('删表')
drop_cmd = self.standby_sh1.execut_db_sql(f"drop table if exists "
f"{self.tb_name};")
self.log.info(drop_cmd)
self.log.info('恢复参数默认值')
res1 = self.standby_sh1.execute_gsguc("reload",
self.constant.GSGUC_SUCCESS_MSG,
f"synchronous_commit="
f"{self.default_value3}")
res2 = self.standby_sh1.execute_gsguc("reload",
self.constant.GSGUC_SUCCESS_MSG,
f"most_available_sync="
f"{self.default_value4}")
res3 = self.standby_sh1.execute_gsguc("reload",
self.constant.GSGUC_SUCCESS_MSG,
f"keep_sync_window="
f"{self.default_value1}")
result = self.standby_sh1.stop_db_cluster()
self.assertTrue(result, '执行失败' + text)
result = self.standby_sh1.restart_db_cluster()
self.assertTrue(result, '执行失败' + text)
self.log.info('恢复原主备关系')
stop_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_ctl stop -D {macro.DB_INSTANCE_PATH};"
self.log.info(stop_cmd)
excute_msg = self.db_standby1_user_node.sh(stop_cmd).result()
self.log.info(excute_msg)
self.assertIn(self.constant.GS_CTL_STOP_SUCCESS_MSG, excute_msg,
'执行失败' + text)
excute_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_ctl failover -D {macro.DB_INSTANCE_PATH} -m fast;"
self.log.info(excute_cmd)
excute_msg = self.db_primary_user_node.sh(excute_cmd).result()
self.log.info(excute_msg)
self.assertIn(self.constant.FAILOVER_SUCCESS_MSG, excute_msg,
'执行失败' + text)
excute_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_om -t refreshconf;"
self.log.info(excute_cmd)
excute_msg = self.db_primary_user_node.sh(excute_cmd).result()
self.log.info(excute_msg)
self.assertIn(self.constant.REFRESHCONF_SUCCESS_MSG, excute_msg,
'执行失败' + text)
restart_msg = Primary_SH.restart_db_cluster()
self.log.info(restart_msg)
status = Primary_SH.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
self.log.info('断言teardown成功')
self.assertIn(self.constant.TABLE_DROP_SUCCESS, drop_cmd,
'执行失败' + text)
self.assertEqual(True, res1, '执行失败' + text)
self.assertEqual(True, res2, '执行失败' + text)
self.assertEqual(True, res3, '执行失败' + text)
self.log.info('-Opengauss_Function_Keep_Sync_Window_Case0023 finish-')
|
[
"testcase.utils.CommonSH.CommonSH",
"testcase.utils.Logger.Logger",
"testcase.utils.Constant.Constant",
"testcase.utils.Common.Common",
"time.sleep",
"yat.test.Node",
"testcase.utils.ComThread.ComThread"
] |
[((1629, 1654), 'testcase.utils.CommonSH.CommonSH', 'CommonSH', (['"""PrimaryDbUser"""'], {}), "('PrimaryDbUser')\n", (1637, 1654), False, 'from testcase.utils.CommonSH import CommonSH\n'), ((1799, 1807), 'testcase.utils.Logger.Logger', 'Logger', ([], {}), '()\n', (1805, 1807), False, 'from testcase.utils.Logger import Logger\n'), ((1910, 1920), 'testcase.utils.Constant.Constant', 'Constant', ([], {}), '()\n', (1918, 1920), False, 'from testcase.utils.Constant import Constant\n'), ((1943, 1951), 'testcase.utils.Common.Common', 'Common', ([], {}), '()\n', (1949, 1951), False, 'from testcase.utils.Common import Common\n'), ((1979, 2005), 'testcase.utils.CommonSH.CommonSH', 'CommonSH', (['"""Standby1DbUser"""'], {}), "('Standby1DbUser')\n", (1987, 2005), False, 'from testcase.utils.CommonSH import CommonSH\n'), ((2033, 2059), 'testcase.utils.CommonSH.CommonSH', 'CommonSH', (['"""Standby2DbUser"""'], {}), "('Standby2DbUser')\n", (2041, 2059), False, 'from testcase.utils.CommonSH import CommonSH\n'), ((2096, 2122), 'yat.test.Node', 'Node', ([], {'node': '"""PrimaryDbUser"""'}), "(node='PrimaryDbUser')\n", (2100, 2122), False, 'from yat.test import Node\n'), ((2160, 2187), 'yat.test.Node', 'Node', ([], {'node': '"""Standby1DbUser"""'}), "(node='Standby1DbUser')\n", (2164, 2187), False, 'from yat.test import Node\n'), ((2222, 2247), 'yat.test.Node', 'Node', ([], {'node': '"""Standby1Root"""'}), "(node='Standby1Root')\n", (2226, 2247), False, 'from yat.test import Node\n'), ((8647, 8705), 'testcase.utils.ComThread.ComThread', 'ComThread', (['self.standby_sh1.execut_db_sql'], {'args': '(sql_cmd,)'}), '(self.standby_sh1.execut_db_sql, args=(sql_cmd,))\n', (8656, 8705), False, 'from testcase.utils.ComThread import ComThread\n'), ((9225, 9240), 'time.sleep', 'time.sleep', (['(200)'], {}), '(200)\n', (9235, 9240), False, 'import time\n')]
|
#!/usr/bin/env python
from __future__ import print_function
import pycreate2
import argparse
import time
DESCRIPTION = """
Shuts down the Create 2.
"""
def handleArgs():
parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter)
# parser.add_argument('-m', '--max', help='max id', type=int, default=253)
# parser.add_argument('-s', '--sleep', help='time in seconds between samples, default 1.0', type=float, default=1.0)
parser.add_argument('-b', '--baud', help='baudrate, default is 115200', type=int, default=115200)
parser.add_argument('port', help='serial port name, Ex: /dev/ttyUSB0 or COM1', type=str)
args = vars(parser.parse_args())
return args
if __name__ == "__main__":
args = handleArgs()
port = args['port']
baud = args['baud']
bot = pycreate2.Create2(port=port, baud=baud)
bot.start()
time.sleep(0.25)
bot.power() # this seems to shut it down more than stop ... confused
# bot.shutdown()
time.sleep(0.25)
bot.stop()
time.sleep(1)
print('=====================================================')
print('\n\tCreate Shutdown')
print('\tHit power button to wake-up\n')
print('=====================================================')
|
[
"pycreate2.Create2",
"argparse.ArgumentParser",
"time.sleep"
] |
[((185, 285), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'DESCRIPTION', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=DESCRIPTION, formatter_class=argparse.\n RawTextHelpFormatter)\n', (208, 285), False, 'import argparse\n'), ((813, 852), 'pycreate2.Create2', 'pycreate2.Create2', ([], {'port': 'port', 'baud': 'baud'}), '(port=port, baud=baud)\n', (830, 852), False, 'import pycreate2\n'), ((868, 884), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (878, 884), False, 'import time\n'), ((975, 991), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (985, 991), False, 'import time\n'), ((1005, 1018), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1015, 1018), False, 'import time\n')]
|
from auth.gateio_auth import *
from gate_api import ApiClient, Configuration, Order, SpotApi
client = load_gateio_creds('auth/auth.yml')
spot_api = SpotApi(ApiClient(client))
def get_last_price(base,quote):
"""
Args:
'DOT', 'USDT'
"""
tickers = spot_api.list_tickers(currency_pair=f'{base}_{quote}')
assert len(tickers) == 1
return tickers[0].last
def get_min_amount(base,quote):
"""
Args:
'DOT', 'USDT'
"""
try:
min_amount = spot_api.get_currency_pair(currency_pair=f'{base}_{quote}').min_quote_amount
except Exception as e:
print(e)
else:
return min_amount
def place_order(base,quote, amount, side, last_price):
"""
Args:
'DOT', 'USDT', 50, 'buy', 400
"""
try:
order = Order(amount=str(float(amount)/float(last_price)), price=last_price, side=side, currency_pair=f'{base}_{quote}')
order = spot_api.create_order(order)
print("[BUY-Thread]Order status:{}".format(order.status))
print("[BUY-Thread]Order object:{}".format(vars(order)))
except Exception as e:
print(e)
else:
return order
|
[
"gate_api.ApiClient"
] |
[((157, 174), 'gate_api.ApiClient', 'ApiClient', (['client'], {}), '(client)\n', (166, 174), False, 'from gate_api import ApiClient, Configuration, Order, SpotApi\n')]
|
"""
Supernet for differentiable rollouts.
"""
import contextlib
import torch
from torch.nn import functional as F
from aw_nas import assert_rollout_type, utils
from aw_nas.rollout.base import DartsArch, DifferentiableRollout, BaseRollout
from aw_nas.utils import data_parallel, use_params
from aw_nas.weights_manager.base import CandidateNet
from aw_nas.weights_manager.shared import SharedNet, SharedCell, SharedOp
__all__ = ["DiffSubCandidateNet", "DiffSuperNet"]
class DiffSubCandidateNet(CandidateNet):
def __init__(self, super_net, rollout: DifferentiableRollout, gpus=tuple(),
virtual_parameter_only=True, eval_no_grad=True):
super(DiffSubCandidateNet, self).__init__(eval_no_grad=eval_no_grad)
self.super_net = super_net
self._device = super_net.device
self.gpus = gpus
self.arch = rollout.arch
self.virtual_parameter_only = virtual_parameter_only
def get_device(self):
return self._device
@contextlib.contextmanager
def begin_virtual(self):
w_clone = {k: v.clone() for k, v in self.named_parameters()}
if not self.virtual_parameter_only:
buffer_clone = {k: v.clone() for k, v in self.named_buffers()}
yield
for n, v in self.named_parameters():
v.data.copy_(w_clone[n])
del w_clone
if not self.virtual_parameter_only:
for n, v in self.named_buffers():
v.data.copy_(buffer_clone[n])
del buffer_clone
def forward(self, inputs, detach_arch=True): #pylint: disable=arguments-differ
if detach_arch:
arch = [
DartsArch(
op_weights=op_weights.detach(),
edge_norms=edge_norms.detach() if edge_norms is not None else None
) for op_weights, edge_norms in self.arch
]
else:
arch = self.arch
if not self.gpus or len(self.gpus) == 1:
return self.super_net.forward(inputs, arch, detach_arch=detach_arch)
if arch[0].op_weights.ndimension() == 2:
arch = [
DartsArch(
op_weights=a.op_weights.repeat(len(self.gpus), 1),
edge_norms=(a.edge_norms.repeat(len(self.gpus)) \
if a.edge_norms is not None else None))
for a in arch
]
else:
# Ugly fix for rollout_size > 1
# call scatter here and stack...
# split along dimension 1,
# then concatenate along dimension 0 for `data_parallel` to scatter it again
num_split = len(self.gpus)
rollout_batch_size = arch[0].op_weights.shape[1]
assert rollout_batch_size % num_split == 0
split_size = rollout_batch_size // num_split
# arch = [torch.cat(torch.split(a, split_size, dim=1), dim=0) for a in arch]
# Note: edge_norms (1-dim) do not support batch_size, just repeat
arch = [DartsArch(
op_weights=torch.cat(torch.split(a.op_weights, split_size, dim=1), dim=0),
edge_norms=(a.edge_norms.repeat(len(self.gpus)) \
if a.edge_norms is not None else None))
for a in arch]
return data_parallel(self.super_net, (inputs, arch), self.gpus,
module_kwargs={"detach_arch": detach_arch})
def _forward_with_params(self, inputs, params, **kwargs): #pylint: disable=arguments-differ
with use_params(self.super_net, params):
return self.forward(inputs, **kwargs)
def named_parameters(self, *args, **kwargs): #pylint: disable=arguments-differ
return self.super_net.named_parameters(*args, **kwargs)
def named_buffers(self, *args, **kwargs): #pylint: disable=arguments-differ
return self.super_net.named_buffers(*args, **kwargs)
def eval_data(self, data, criterions, mode="eval", **kwargs): #pylint: disable=arguments-differ
"""
Override eval_data, to enable gradient.
Returns:
results (list of results return by criterions)
"""
self._set_mode(mode)
outputs = self.forward_data(data[0], **kwargs)
return utils.flatten_list([c(data[0], outputs, data[1]) for c in criterions])
class DiffSuperNet(SharedNet):
NAME = "diff_supernet"
def __init__(self, search_space, device, rollout_type="differentiable",
gpus=tuple(),
num_classes=10, init_channels=16, stem_multiplier=3,
max_grad_norm=5.0, dropout_rate=0.1,
use_stem="conv_bn_3x3", stem_stride=1, stem_affine=True,
preprocess_op_type=None,
cell_use_preprocess=True,
cell_use_shortcut=False,
cell_shortcut_op_type="skip_connect",
cell_group_kwargs=None,
candidate_virtual_parameter_only=False,
candidate_eval_no_grad=True):
super(DiffSuperNet, self).__init__(
search_space, device, rollout_type,
cell_cls=DiffSharedCell, op_cls=DiffSharedOp,
gpus=gpus,
num_classes=num_classes, init_channels=init_channels,
stem_multiplier=stem_multiplier,
max_grad_norm=max_grad_norm, dropout_rate=dropout_rate,
use_stem=use_stem, stem_stride=stem_stride, stem_affine=stem_affine,
preprocess_op_type=preprocess_op_type,
cell_use_preprocess=cell_use_preprocess,
cell_group_kwargs=cell_group_kwargs,
cell_use_shortcut=cell_use_shortcut,
cell_shortcut_op_type=cell_shortcut_op_type)
self.candidate_virtual_parameter_only = candidate_virtual_parameter_only
self.candidate_eval_no_grad = candidate_eval_no_grad
# ---- APIs ----
def extract_features(self, inputs, rollout_or_arch, **kwargs):
if isinstance(rollout_or_arch, BaseRollout):
# from extract_features (wrapper wm)
arch = rollout_or_arch.arch
else:
# from candidate net
arch = rollout_or_arch
return super().extract_features(inputs, arch, **kwargs)
def assemble_candidate(self, rollout):
return DiffSubCandidateNet(self, rollout, gpus=self.gpus,
virtual_parameter_only=self.candidate_virtual_parameter_only,
eval_no_grad=self.candidate_eval_no_grad)
@classmethod
def supported_rollout_types(cls):
return [assert_rollout_type("differentiable")]
class DiffSharedCell(SharedCell):
def num_out_channel(self):
return self.num_out_channels * self._steps
def forward(self, inputs, arch, detach_arch=True): # pylint: disable=arguments-differ
assert self._num_init == len(inputs)
states = [op(_input) for op, _input in zip(self.preprocess_ops, inputs)]
offset = 0
# in parallel forward, after scatter, a namedtuple will be come a normal tuple
arch = DartsArch(*arch)
use_edge_normalization = arch.edge_norms is not None
for i_step in range(self._steps):
to_ = i_step + self._num_init
if use_edge_normalization:
act_lst = [
arch.edge_norms[offset + from_] * # edge norm factor scalar on this edge
self.edges[from_][to_](
state,
arch.op_weights[offset + from_], # op weights vector on this edge
detach_arch=detach_arch
)
for from_, state in enumerate(states)
]
else:
act_lst = [
self.edges[from_][to_](
state, arch.op_weights[offset + from_], detach_arch=detach_arch
)
for from_, state in enumerate(states)
]
new_state = sum(act_lst)
offset += len(states)
states.append(new_state)
out = torch.cat(states[-self._steps:], dim=1)
if self.use_shortcut and self.layer_index != 0:
out = out + self.shortcut_reduction_op(inputs[-1])
return out
class DiffSharedOp(SharedOp):
def forward(self, x, weights, detach_arch=True): # pylint: disable=arguments-differ
if weights.ndimension() == 2:
# weights: (batch_size, num_op)
if not weights.shape[0] == x.shape[0]:
# every `x.shape[0] % weights.shape[0]` data use the same sampled arch weights
assert x.shape[0] % weights.shape[0] == 0
weights = weights.repeat(x.shape[0] // weights.shape[0], 1)
return sum(
[
weights[:, i].reshape(-1, 1, 1, 1) * op(x)
for i, op in enumerate(self.p_ops)
]
)
out_act: torch.Tensor = 0.0
# weights: (num_op)
if self.partial_channel_proportion is None:
for w, op in zip(weights, self.p_ops):
if detach_arch and w.item() == 0:
continue
act = op(x).detach_() if w.item() == 0 else op(x)
out_act += w * act
else:
op_channels = x.shape[1] // self.partial_channel_proportion
x_1 = x[:, :op_channels, :, :] # these channels goes through op
x_2 = x[:, op_channels:, :, :] # these channels skips op
# apply pooling if the ops have stride=2
if self.stride == 2:
x_2 = F.max_pool2d(x_2, 2, 2)
for w, op in zip(weights, self.p_ops):
# if detach_arch and w.item() == 0:
# continue # not really sure about this
act = op(x_1)
# if w.item() == 0:
# act.detach_() # not really sure about this either
out_act += w * act
out_act = torch.cat((out_act, x_2), dim=1)
# PC-DARTS implements a deterministic channel_shuffle() (not what they said in the paper)
# ref: https://github.com/yuhuixu1993/PC-DARTS/blob/b74702f86c70e330ce0db35762cfade9df026bb7/model_search.py#L9
out_act = self._channel_shuffle(out_act, self.partial_channel_proportion)
# this is the random channel shuffle
# channel_perm = torch.randperm(out_act.shape[1])
# out_act = out_act[:, channel_perm, :, :]
return out_act
@staticmethod
def _channel_shuffle(x: torch.Tensor, groups: int):
"""channel shuffle for PC-DARTS"""
n, c, h, w = x.shape
x = x.view(n, groups, -1, h, w).transpose(1, 2).contiguous()
x = x.view(n, c, h, w).contiguous()
return x
|
[
"aw_nas.assert_rollout_type",
"aw_nas.utils.data_parallel",
"torch.split",
"torch.cat",
"aw_nas.utils.use_params",
"torch.nn.functional.max_pool2d",
"aw_nas.rollout.base.DartsArch"
] |
[((3318, 3423), 'aw_nas.utils.data_parallel', 'data_parallel', (['self.super_net', '(inputs, arch)', 'self.gpus'], {'module_kwargs': "{'detach_arch': detach_arch}"}), "(self.super_net, (inputs, arch), self.gpus, module_kwargs={\n 'detach_arch': detach_arch})\n", (3331, 3423), False, 'from aw_nas.utils import data_parallel, use_params\n'), ((7113, 7129), 'aw_nas.rollout.base.DartsArch', 'DartsArch', (['*arch'], {}), '(*arch)\n', (7122, 7129), False, 'from aw_nas.rollout.base import DartsArch, DifferentiableRollout, BaseRollout\n'), ((8148, 8187), 'torch.cat', 'torch.cat', (['states[-self._steps:]'], {'dim': '(1)'}), '(states[-self._steps:], dim=1)\n', (8157, 8187), False, 'import torch\n'), ((3558, 3592), 'aw_nas.utils.use_params', 'use_params', (['self.super_net', 'params'], {}), '(self.super_net, params)\n', (3568, 3592), False, 'from aw_nas.utils import data_parallel, use_params\n'), ((6615, 6652), 'aw_nas.assert_rollout_type', 'assert_rollout_type', (['"""differentiable"""'], {}), "('differentiable')\n", (6634, 6652), False, 'from aw_nas import assert_rollout_type, utils\n'), ((10078, 10110), 'torch.cat', 'torch.cat', (['(out_act, x_2)'], {'dim': '(1)'}), '((out_act, x_2), dim=1)\n', (10087, 10110), False, 'import torch\n'), ((9691, 9714), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x_2', '(2)', '(2)'], {}), '(x_2, 2, 2)\n', (9703, 9714), True, 'from torch.nn import functional as F\n'), ((3080, 3124), 'torch.split', 'torch.split', (['a.op_weights', 'split_size'], {'dim': '(1)'}), '(a.op_weights, split_size, dim=1)\n', (3091, 3124), False, 'import torch\n')]
|
# Generated by Django 2.0.7 on 2018-07-21 00:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('publicbanking', '0007_auto_20180719_0113'),
]
operations = [
migrations.AlterField(
model_name='transaction',
name='transaction_destination',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transaction_destination', to='publicbanking.Account'),
),
migrations.AlterField(
model_name='transaction',
name='transaction_origin',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transaction_origin', to='publicbanking.Account'),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((397, 532), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""transaction_destination"""', 'to': '"""publicbanking.Account"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='transaction_destination', to='publicbanking.Account')\n", (414, 532), False, 'from django.db import migrations, models\n'), ((666, 796), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""transaction_origin"""', 'to': '"""publicbanking.Account"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='transaction_origin', to='publicbanking.Account')\n", (683, 796), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
"""
Node Classes.
* :any:`Node`: a simple tree node
* :any:`NodeMixin`: extends any python class to a tree node.
"""
from __future__ import print_function
from .iterators import PreOrderIter
import warnings
class NodeMixin(object):
separator = "/"
u"""
The :any:`NodeMixin` class extends any Python class to a tree node.
The only tree relevant information is the `parent` attribute.
If `None` the :any:`NodeMixin` is root node.
If set to another node, the :any:`NodeMixin` becomes the child of it.
>>> from anytree import Node, RenderTree
>>> class MyBaseClass(object):
... foo = 4
>>> class MyClass(MyBaseClass, NodeMixin): # Add Node feature
... def __init__(self, name, length, width, parent=None):
... super(MyClass, self).__init__()
... self.name = name
... self.length = length
... self.width = width
... self.parent = parent
>>> my0 = MyClass('my0', 0, 0)
>>> my1 = MyClass('my1', 1, 0, parent=my0)
>>> my2 = MyClass('my2', 0, 2, parent=my0)
>>> for pre, _, node in RenderTree(my0):
... treestr = u"%s%s" % (pre, node.name)
... print(treestr.ljust(8), node.length, node.width)
my0 0 0
├── my1 1 0
└── my2 0 2
"""
@property
def parent(self):
u"""
Parent Node.
On set, the node is detached from any previous parent node and attached
to the new node.
>>> from anytree import Node, RenderTree
>>> udo = Node("Udo")
>>> marc = Node("Marc")
>>> lian = Node("Lian", parent=marc)
>>> print(RenderTree(udo))
Node('/Udo')
>>> print(RenderTree(marc))
Node('/Marc')
└── Node('/Marc/Lian')
**Attach**
>>> marc.parent = udo
>>> print(RenderTree(udo))
Node('/Udo')
└── Node('/Udo/Marc')
└── Node('/Udo/Marc/Lian')
**Detach**
To make a node to a root node, just set this attribute to `None`.
>>> marc.is_root
False
>>> marc.parent = None
>>> marc.is_root
True
"""
try:
return self._parent
except AttributeError:
return None
@parent.setter
def parent(self, value):
try:
parent = self._parent
except AttributeError:
parent = None
if value is None:
# make this node to root node
self.__detach(parent)
elif parent is not value:
# change parent node
self.__check_loop(value)
self.__detach(parent)
self.__attach(value)
else:
# keep parent
pass
# apply
self._parent = value
def __check_loop(self, node):
if node is self:
msg = "Cannot set parent. %r cannot be parent of itself."
raise LoopError(msg % self)
if self in node.path:
msg = "Cannot set parent. %r is parent of %r."
raise LoopError(msg % (self, node))
def __detach(self, parent):
if parent:
self._pre_detach(parent)
parentchildren = parent._children
assert self in parentchildren, "Tree internal data is corrupt."
parentchildren.remove(self)
self._post_detach(parent)
def __attach(self, parent):
self._pre_attach(parent)
parentchildren = parent._children
assert self not in parentchildren, "Tree internal data is corrupt."
parentchildren.append(self)
self._post_attach(parent)
@property
def _children(self):
try:
return self.__children
except AttributeError:
self.__children = []
return self.__children
@property
def children(self):
"""
All child nodes.
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> dan.children
(Node('/Dan/Jet'), Node('/Dan/Jan'), Node('/Dan/Joe'))
"""
return tuple(self._children)
@children.setter
def children(self, children):
self._pre_attach_children(children)
old_children = self.children
del self.children
try:
for child in children:
assert isinstance(child, NodeMixin), ("Cannot add non-node object %r." % child)
child.parent = self
assert len(self.children) == len(children)
self._post_attach_children(children)
except:
self.children = old_children
raise
@children.deleter
def children(self):
children = self.children
self._pre_detach_children(children)
for child in self.children:
child.parent = None
assert len(self.children) == 0
self._post_detach_children(children)
def _pre_detach_children(self, children):
"""Method call before detaching `children`."""
pass
def _post_detach_children(self, children):
"""Method call after detaching `children`."""
pass
def _pre_attach_children(self, children):
"""Method call before attaching `children`."""
pass
def _post_attach_children(self, children):
"""Method call after attaching `children`."""
pass
@property
def path(self):
"""
Path of this `Node`.
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.path
(Node('/Udo'),)
>>> marc.path
(Node('/Udo'), Node('/Udo/Marc'))
>>> lian.path
(Node('/Udo'), Node('/Udo/Marc'), Node('/Udo/Marc/Lian'))
"""
return self._path
@property
def _path(self):
path = []
node = self
while node:
path.insert(0, node)
node = node.parent
return tuple(path)
@property
def ancestors(self):
"""
All parent nodes and their parent nodes.
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.ancestors
()
>>> marc.ancestors
(Node('/Udo'),)
>>> lian.ancestors
(Node('/Udo'), Node('/Udo/Marc'))
"""
return self._path[:-1]
@property
def anchestors(self):
"""
All parent nodes and their parent nodes - see :any:`ancestors`.
The attribute `anchestors` is just a typo of `ancestors`. Please use `ancestors`.
This attribute will be removed in the 2.0.0 release.
"""
warnings.warn(".anchestors was a typo and will be removed in version 3.0.0", DeprecationWarning)
return self.ancestors
@property
def descendants(self):
"""
All child nodes and all their child nodes.
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> soe = Node("Soe", parent=lian)
>>> udo.descendants
(Node('/Udo/Marc'), Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lian/Soe'), Node('/Udo/Marc/Loui'))
>>> marc.descendants
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lian/Soe'), Node('/Udo/Marc/Loui'))
>>> lian.descendants
(Node('/Udo/Marc/Lian/Soe'),)
"""
return tuple(PreOrderIter(self))[1:]
@property
def root(self):
"""
Tree Root Node.
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.root
Node('/Udo')
>>> marc.root
Node('/Udo')
>>> lian.root
Node('/Udo')
"""
if self.parent:
return self._path[0]
else:
return self
@property
def siblings(self):
"""
Tuple of nodes with the same parent.
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> lazy = Node("Lazy", parent=marc)
>>> udo.siblings
()
>>> marc.siblings
()
>>> lian.siblings
(Node('/Udo/Marc/Loui'), Node('/Udo/Marc/Lazy'))
>>> loui.siblings
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lazy'))
"""
parent = self.parent
if parent is None:
return tuple()
else:
return tuple([node for node in parent._children if node != self])
@property
def is_leaf(self):
"""
`Node` has no childrean (External Node).
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.is_leaf
False
>>> marc.is_leaf
False
>>> lian.is_leaf
True
"""
return len(self._children) == 0
@property
def is_root(self):
"""
`Node` is tree root.
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.is_root
True
>>> marc.is_root
False
>>> lian.is_root
False
"""
return self.parent is None
@property
def height(self):
"""
Number of edges on the longest path to a leaf `Node`.
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.height
2
>>> marc.height
1
>>> lian.height
0
"""
if self._children:
return max([child.height for child in self._children]) + 1
else:
return 0
@property
def depth(self):
"""
Number of edges to the root `Node`.
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.depth
0
>>> marc.depth
1
>>> lian.depth
2
"""
return len(self._path) - 1
def _pre_detach(self, parent):
"""Method call before detaching from `parent`."""
pass
def _post_detach(self, parent):
"""Method call after detaching from `parent`."""
pass
def _pre_attach(self, parent):
"""Method call before attaching to `parent`."""
pass
def _post_attach(self, parent):
"""Method call after attaching to `parent`."""
pass
class Node(NodeMixin, object):
def __init__(self, name, parent=None, **kwargs):
u"""
A simple tree node with a `name` and any `kwargs`.
>>> from anytree import Node, RenderTree
>>> root = Node("root")
>>> s0 = Node("sub0", parent=root)
>>> s0b = Node("sub0B", parent=s0, foo=4, bar=109)
>>> s0a = Node("sub0A", parent=s0)
>>> s1 = Node("sub1", parent=root)
>>> s1a = Node("sub1A", parent=s1)
>>> s1b = Node("sub1B", parent=s1, bar=8)
>>> s1c = Node("sub1C", parent=s1)
>>> s1ca = Node("sub1Ca", parent=s1c)
>>> print(RenderTree(root))
Node('/root')
├── Node('/root/sub0')
│ ├── Node('/root/sub0/sub0B', bar=109, foo=4)
│ └── Node('/root/sub0/sub0A')
└── Node('/root/sub1')
├── Node('/root/sub1/sub1A')
├── Node('/root/sub1/sub1B', bar=8)
└── Node('/root/sub1/sub1C')
└── Node('/root/sub1/sub1C/sub1Ca')
"""
self.name = name
self.parent = parent
self.__dict__.update(kwargs)
@property
def name(self):
"""Name."""
return self._name
@name.setter
def name(self, value):
self._name = value
def __repr__(self):
classname = self.__class__.__name__
args = ["%r" % self.separator.join([""] + [str(node.name) for node in self.path])]
for key, value in filter(lambda item: not item[0].startswith("_"),
sorted(self.__dict__.items(),
key=lambda item: item[0])):
args.append("%s=%r" % (key, value))
return "%s(%s)" % (classname, ", ".join(args))
class LoopError(RuntimeError):
"""Tree contains infinite loop."""
pass
|
[
"warnings.warn"
] |
[((6822, 6922), 'warnings.warn', 'warnings.warn', (['""".anchestors was a typo and will be removed in version 3.0.0"""', 'DeprecationWarning'], {}), "('.anchestors was a typo and will be removed in version 3.0.0',\n DeprecationWarning)\n", (6835, 6922), False, 'import warnings\n')]
|
import argparse
from itertools import combinations
from typing import Dict
import json
import os
import pandas as pd
from scirex.metrics.clustering_metrics import match_predicted_clusters_to_gold
from scirex.metrics.f1 import compute_f1
from scirex.predictors.utils import map_predicted_spans_to_gold, merge_method_subrelations
from scirex_utilities.entity_utils import used_entities
from scirex_utilities.json_utilities import load_jsonl
parser = argparse.ArgumentParser()
parser.add_argument("--gold-file")
parser.add_argument("--ner-file")
parser.add_argument("--clusters-file")
parser.add_argument("--salient-mentions-file")
def has_all_mentions(doc, relation):
has_mentions = all(len(doc["clusters"][x[1]]) > 0 for x in relation)
return has_mentions
def convert_to_dict(data):
return {x["doc_id"]: x for x in data}
def ner_metrics(gold_data, predicted_data):
mapping = {}
for doc in gold_data:
predicted_doc = predicted_data[doc["doc_id"]]
predicted_spans = predicted_doc["ner"]
gold_spans = doc["ner"]
mapping[doc["doc_id"]] = map_predicted_spans_to_gold(predicted_spans, gold_spans)
return mapping
def salent_mentions_metrics(gold_data,
predicted_salient_mentions,
produce_error_file = True,
generate_errors_file="/tmp/salient_mentions_error_files"):
all_metrics = []
predicted = 0
gold = 0
matched = 0
marked_up_words = []
for i, doc in enumerate(gold_data):
gold_salient_spans = [span for coref_cluster in doc['coref'].values() for span in coref_cluster]
predicted_doc = predicted_salient_mentions[doc["doc_id"]]
saliency_spans = []
doc_words = doc["words"]
if produce_error_file:
writer = open(os.path.join(generate_errors_file, str(i)), 'w')
writer.write(json.dumps(doc["n_ary_relations"]) + "\n")
existing_spans = set()
for [start_span, end_span, saliency, _] in predicted_doc["saliency"]:
if saliency:
saliency_spans.append((start_span, end_span))
if produce_error_file:
if (start_span, end_span) not in existing_spans:
# Add span metadata gloss to text.
existing_spans.add((start_span, end_span))
gold_saliency = (start_span, end_span) in gold_salient_spans
if gold_saliency and saliency:
doc_words[start_span] = '{+{' + doc_words[start_span]
doc_words[end_span] = doc_words[end_span] + '}+}'
elif saliency:
doc_words[start_span] = '<-<' + doc_words[start_span]
doc_words[end_span-1] = doc_words[end_span-1] + '>->'
elif gold_saliency:
doc_words[start_span] = '<+<' + doc_words[start_span]
doc_words[end_span] = doc_words[end_span] + '>+>'
else:
doc_words[start_span] = '{-{' + doc_words[start_span]
doc_words[end_span-1] = doc_words[end_span-1] + '}-}'
for _, end_sentence_idx in doc["sentences"]:
doc_words[end_sentence_idx-1] = doc_words[end_sentence_idx-1] + " "
for start_section, end_section in doc["sections"]:
doc_words[start_section] = '\t' + doc_words[start_section]
doc_words[end_section-1] = doc_words[end_section-1] + '\n'
matching_spans = set(gold_salient_spans).intersection(saliency_spans)
matched += len(matching_spans)
predicted += len(saliency_spans)
gold += len(gold_salient_spans)
if produce_error_file:
writer.write(f"# of gold salient spans: {len(gold_salient_spans)}\n")
writer.write(f"# of predicted salient spans: {len(saliency_spans)}\n")
writer.write(f"# of matching spans: {len(matching_spans)}\n")
i = 0
while i < len(doc_words):
delimiters = ['{+{', '}+}', '<-<', '>->', '<+<', '>+>', '{-{', '}-}']
character = doc_words[i].strip()
for delimiter in delimiters:
character = character.strip(delimiter)
if len(character) == 1:
if character in [".", ",", "?", "!", ":", ";", ")", "]"]:
doc_words[i-1] = doc_words[i-1] + doc_words[i]
del doc_words[i]
i -= 1
elif character in ["(", "["]:
doc_words[i+1] = doc_words[i] + doc_words[i+1]
del doc_words[i]
i -= 1
i += 1
writer.write(" ".join(doc_words))
precision, recall, f1 = compute_f1(predicted, gold, matched, m=1)
all_metrics = pd.DataFrame({"f1": [f1], "p": [precision], "r": [recall]})
print("Salient Mention Classification Metrics")
print(all_metrics.describe().loc['mean'])
if produce_error_file:
writer.close()
print(f"Wrote error-annotated predictions to {generate_errors_file}")
def clustering_metrics(gold_data, predicted_clusters, span_map):
all_metrics = []
mappings = {}
for doc in gold_data:
predicted_doc = predicted_clusters[doc["doc_id"]]
metrics, mapping = match_predicted_clusters_to_gold(
predicted_doc["clusters"], doc["coref"], span_map[doc["doc_id"]], doc['words']
)
mappings[doc["doc_id"]] = mapping
all_metrics.append(metrics)
all_metrics = pd.DataFrame(all_metrics)
print("Salient Clustering Metrics")
print(all_metrics.describe().loc['mean'])
return mappings
def get_types_of_clusters(predicted_ner, predicted_clusters):
for doc_id in predicted_clusters:
clusters = predicted_clusters[doc_id]["clusters"]
ner = {(x[0], x[1]): x[2] for x in predicted_ner[doc_id]["ner"]}
predicted_clusters[doc_id]["types"] = {}
for c, spans in clusters.items():
types = set([ner[tuple(span)] for span in spans])
if len(types) == 0:
predicted_clusters[doc_id]["types"][c] = "Empty"
continue
predicted_clusters[doc_id]["types"][c] = list(types)[0]
def main(args):
gold_data = load_jsonl(args.gold_file)
for d in gold_data:
merge_method_subrelations(d)
d["clusters"] = d["coref"]
predicted_salient_mentions = convert_to_dict(load_jsonl(args.salient_mentions_file))
salent_mentions_metrics(gold_data, predicted_salient_mentions)
predicted_ner = convert_to_dict(load_jsonl(args.ner_file))
predicted_salient_clusters = convert_to_dict(load_jsonl(args.clusters_file))
for d, doc in predicted_salient_clusters.items() :
if 'clusters' not in doc :
merge_method_subrelations(doc)
doc['clusters'] = {x:v for x, v in doc['coref'].items() if len(v) > 0}
predicted_span_to_gold_span_map: Dict[str, Dict[tuple, tuple]] = ner_metrics(gold_data, predicted_ner)
get_types_of_clusters(predicted_ner, predicted_salient_clusters)
get_types_of_clusters(convert_to_dict(gold_data), convert_to_dict(gold_data))
predicted_cluster_to_gold_cluster_map = clustering_metrics(
gold_data, predicted_salient_clusters, predicted_span_to_gold_span_map
)
if __name__ == "__main__":
args = parser.parse_args()
main(args)
|
[
"pandas.DataFrame",
"scirex_utilities.json_utilities.load_jsonl",
"argparse.ArgumentParser",
"scirex.metrics.clustering_metrics.match_predicted_clusters_to_gold",
"scirex.predictors.utils.map_predicted_spans_to_gold",
"scirex.metrics.f1.compute_f1",
"json.dumps",
"scirex.predictors.utils.merge_method_subrelations"
] |
[((451, 476), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (474, 476), False, 'import argparse\n'), ((4872, 4913), 'scirex.metrics.f1.compute_f1', 'compute_f1', (['predicted', 'gold', 'matched'], {'m': '(1)'}), '(predicted, gold, matched, m=1)\n', (4882, 4913), False, 'from scirex.metrics.f1 import compute_f1\n'), ((4932, 4991), 'pandas.DataFrame', 'pd.DataFrame', (["{'f1': [f1], 'p': [precision], 'r': [recall]}"], {}), "({'f1': [f1], 'p': [precision], 'r': [recall]})\n", (4944, 4991), True, 'import pandas as pd\n'), ((5668, 5693), 'pandas.DataFrame', 'pd.DataFrame', (['all_metrics'], {}), '(all_metrics)\n', (5680, 5693), True, 'import pandas as pd\n'), ((6411, 6437), 'scirex_utilities.json_utilities.load_jsonl', 'load_jsonl', (['args.gold_file'], {}), '(args.gold_file)\n', (6421, 6437), False, 'from scirex_utilities.json_utilities import load_jsonl\n'), ((1094, 1150), 'scirex.predictors.utils.map_predicted_spans_to_gold', 'map_predicted_spans_to_gold', (['predicted_spans', 'gold_spans'], {}), '(predicted_spans, gold_spans)\n', (1121, 1150), False, 'from scirex.predictors.utils import map_predicted_spans_to_gold, merge_method_subrelations\n'), ((5436, 5552), 'scirex.metrics.clustering_metrics.match_predicted_clusters_to_gold', 'match_predicted_clusters_to_gold', (["predicted_doc['clusters']", "doc['coref']", "span_map[doc['doc_id']]", "doc['words']"], {}), "(predicted_doc['clusters'], doc['coref'],\n span_map[doc['doc_id']], doc['words'])\n", (5468, 5552), False, 'from scirex.metrics.clustering_metrics import match_predicted_clusters_to_gold\n'), ((6470, 6498), 'scirex.predictors.utils.merge_method_subrelations', 'merge_method_subrelations', (['d'], {}), '(d)\n', (6495, 6498), False, 'from scirex.predictors.utils import map_predicted_spans_to_gold, merge_method_subrelations\n'), ((6584, 6622), 'scirex_utilities.json_utilities.load_jsonl', 'load_jsonl', (['args.salient_mentions_file'], {}), '(args.salient_mentions_file)\n', (6594, 6622), False, 'from scirex_utilities.json_utilities import load_jsonl\n'), ((6728, 6753), 'scirex_utilities.json_utilities.load_jsonl', 'load_jsonl', (['args.ner_file'], {}), '(args.ner_file)\n', (6738, 6753), False, 'from scirex_utilities.json_utilities import load_jsonl\n'), ((6804, 6834), 'scirex_utilities.json_utilities.load_jsonl', 'load_jsonl', (['args.clusters_file'], {}), '(args.clusters_file)\n', (6814, 6834), False, 'from scirex_utilities.json_utilities import load_jsonl\n'), ((6938, 6968), 'scirex.predictors.utils.merge_method_subrelations', 'merge_method_subrelations', (['doc'], {}), '(doc)\n', (6963, 6968), False, 'from scirex.predictors.utils import map_predicted_spans_to_gold, merge_method_subrelations\n'), ((1909, 1943), 'json.dumps', 'json.dumps', (["doc['n_ary_relations']"], {}), "(doc['n_ary_relations'])\n", (1919, 1943), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-07 11:22
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("modeladmintest", "0003_publisher"),
]
operations = [
migrations.CreateModel(
name="VenuePage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
),
),
("address", models.CharField(max_length=300)),
("capacity", models.IntegerField()),
],
options={
"abstract": False,
},
bases=("wagtailcore.page",),
),
]
|
[
"django.db.models.CharField",
"django.db.models.OneToOneField",
"django.db.models.IntegerField"
] |
[((434, 604), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""wagtailcore.Page"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'wagtailcore.Page')\n", (454, 604), False, 'from django.db import migrations, models\n'), ((810, 842), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (826, 842), False, 'from django.db import migrations, models\n'), ((874, 895), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (893, 895), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab
#
# ==================================================================
#
# Copyright (c) 2016, Parallels IP Holdings GmbH
# Released under the terms of MIT license (see LICENSE for details)
#
# ==================================================================
#
'''
email_mention: A hook to notify users mentioned in commit messages
`git ci -m 'My cool new feature @someone'` to send this commit to
someone@domain. Domain is specified in githooks.ini.
'''
import os
import re
import itertools
from textwrap import wrap
import logging
import hookutil
class Hook(object):
def __init__(self, repo_dir, settings, params):
self.repo_dir = repo_dir
self.settings = settings
self.params = params
def compose_mail(self, branch, old_sha, new_sha):
pusher = self.params['user_name']
base_url = self.params['base_url']
proj_key = self.params['proj_key']
repo_name = self.params['repo_name']
email_domain = self.params['email_domain']
# Before the hook is run git has already created
# a new_sha commit object
log = hookutil.parse_git_log(self.repo_dir, branch, old_sha, new_sha)
users = []
for commit in log:
for username in set(re.findall('(?:\W+|^)@(\w[\w\.]*\w|\w)', commit['message'])):
ci = commit.copy()
ci.update({'user': username})
users.append(ci)
users = sorted(users, key=lambda ko: ko['user'])
mails = {}
for user, commits in itertools.groupby(users, key=lambda ko: ko['user']):
text = '<b>Branch:</b> %s\n' % branch.replace('refs/heads/', '')
text += '<b>By user:</b> %s\n' % pusher
text += '\n'
for commit in commits:
link = base_url + \
"/projects/%s/repos/%s/commits/%s\n" % (proj_key, repo_name, commit['commit'])
text += 'Commit: %s (%s)\n' % (commit['commit'], "<a href=%s>View in Stash</a>" % link)
text += 'Author: %s %s\n' % (commit['author_name'], commit['author_email'])
text += 'Date: %s\n' % commit['date']
text += '\n'
text += '\t%s' % '\n\t'.join(wrap(commit['message'], width=70))
text += '\n\n'
mails[user + '@' + email_domain] = text
return mails
def check(self, branch, old_sha, new_sha):
logging.debug("Run: branch=%s, old_sha=%s, new_sha=%s",
branch, old_sha, new_sha)
logging.debug("params=%s", self.params)
try:
pusher = self.params['user_name']
proj_key = self.params['proj_key']
repo_name = self.params['repo_name']
smtp_server = self.params['smtp_server']
smtp_port = self.params['smtp_port']
smtp_from = self.params['smtp_from']
email_domail = self.params['email_domain']
except KeyError as err:
logging.error("%s not in hook settings", err)
raise RuntimeError("%s not in hook settings, check githooks configuration" % err)
# Do not run the hook if the branch is being deleted
if new_sha == '0' * 40:
logging.debug("Deleting the branch, skip the hook")
return True, []
mails = self.compose_mail(branch, old_sha, new_sha)
hookutil.send_mail(mails, smtp_from,
"%s/%s - Hook email-mention: You were mentioned in a commit message" % (proj_key, repo_name),
smtp_server, smtp_port)
return True, []
|
[
"logging.error",
"logging.debug",
"hookutil.parse_git_log",
"textwrap.wrap",
"hookutil.send_mail",
"re.findall",
"itertools.groupby"
] |
[((1182, 1245), 'hookutil.parse_git_log', 'hookutil.parse_git_log', (['self.repo_dir', 'branch', 'old_sha', 'new_sha'], {}), '(self.repo_dir, branch, old_sha, new_sha)\n', (1204, 1245), False, 'import hookutil\n'), ((1608, 1659), 'itertools.groupby', 'itertools.groupby', (['users'], {'key': "(lambda ko: ko['user'])"}), "(users, key=lambda ko: ko['user'])\n", (1625, 1659), False, 'import itertools\n'), ((2509, 2594), 'logging.debug', 'logging.debug', (['"""Run: branch=%s, old_sha=%s, new_sha=%s"""', 'branch', 'old_sha', 'new_sha'], {}), "('Run: branch=%s, old_sha=%s, new_sha=%s', branch, old_sha,\n new_sha)\n", (2522, 2594), False, 'import logging\n'), ((2621, 2660), 'logging.debug', 'logging.debug', (['"""params=%s"""', 'self.params'], {}), "('params=%s', self.params)\n", (2634, 2660), False, 'import logging\n'), ((3462, 3625), 'hookutil.send_mail', 'hookutil.send_mail', (['mails', 'smtp_from', "('%s/%s - Hook email-mention: You were mentioned in a commit message' % (\n proj_key, repo_name))", 'smtp_server', 'smtp_port'], {}), "(mails, smtp_from, \n '%s/%s - Hook email-mention: You were mentioned in a commit message' %\n (proj_key, repo_name), smtp_server, smtp_port)\n", (3480, 3625), False, 'import hookutil\n'), ((3313, 3364), 'logging.debug', 'logging.debug', (['"""Deleting the branch, skip the hook"""'], {}), "('Deleting the branch, skip the hook')\n", (3326, 3364), False, 'import logging\n'), ((1325, 1390), 're.findall', 're.findall', (['"""(?:\\\\W+|^)@(\\\\w[\\\\w\\\\.]*\\\\w|\\\\w)"""', "commit['message']"], {}), "('(?:\\\\W+|^)@(\\\\w[\\\\w\\\\.]*\\\\w|\\\\w)', commit['message'])\n", (1335, 1390), False, 'import re\n'), ((3067, 3112), 'logging.error', 'logging.error', (['"""%s not in hook settings"""', 'err'], {}), "('%s not in hook settings', err)\n", (3080, 3112), False, 'import logging\n'), ((2312, 2345), 'textwrap.wrap', 'wrap', (["commit['message']"], {'width': '(70)'}), "(commit['message'], width=70)\n", (2316, 2345), False, 'from textwrap import wrap\n')]
|
#-------------------------------------------------------------------------------------------------------------------
# Packages & Settings
#-------------------------------------------------------------------------------------------------------------------
# General packages
import time
import sys
import os
import datetime
from glob import glob
import shutil
# Math and data structure packages
import numpy as np
from scipy import stats
import math
# Writing Output
import pickle
text_folder = '/home/rettenls/data/texts/coha/'
exp_folder = '/home/rettenls/data/experiments/coha/'
rand_folder = '/home/rettenls/data/experiments/coha/random/'
coordination_file = exp_folder + 'coordination/coordinate.txt'
date_format = '%Y-%m-%d_%H:%M:%S'
#-------------------------------------------------------------------------------------------------------------------
# Loading own Modules
#-------------------------------------------------------------------------------------------------------------------
sys.path.append('/home/rettenls/code')
from lib.model import Model
from lib.trafo import Transformation
from lib.eval import print_nn_word, get_nn_list, get_cosine_similarity, get_pip_norm
from lib.score import evaluate_analogy
from lib.operations import align, avg
from lib.util import get_filename
#-------------------------------------------------------------------------------------------------------------------
# Experiments
#-------------------------------------------------------------------------------------------------------------------
batches = ['batch_{:04d}'.format(i) for i in range(20)]
decades = [str(1810 + 10 * i) for i in range(20)]
models = ['fasttext', 'word2vec', 'glove']
max_run_num = 32
for model in models:
for decade in decades:
folder = exp_folder + model + '/' + decade
for run_folder in os.listdir(folder):
run = folder + '/' + run_folder
m = Model(model)
try:
m.load(run)
except:
shutil.rmtree(run)
for model in models:
for batch in batches:
rand_folder = exp_folder + model + '/' + batch
for run_folder in os.listdir(folder):
run = folder + '/' + run_folder
m = Model(model)
try:
m.load(run)
except:
shutil.rmtree(run)
|
[
"sys.path.append",
"lib.model.Model",
"os.listdir",
"shutil.rmtree"
] |
[((1006, 1044), 'sys.path.append', 'sys.path.append', (['"""/home/rettenls/code"""'], {}), "('/home/rettenls/code')\n", (1021, 1044), False, 'import sys\n'), ((1849, 1867), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1859, 1867), False, 'import os\n'), ((2096, 2114), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (2106, 2114), False, 'import os\n'), ((1911, 1923), 'lib.model.Model', 'Model', (['model'], {}), '(model)\n', (1916, 1923), False, 'from lib.model import Model\n'), ((2158, 2170), 'lib.model.Model', 'Model', (['model'], {}), '(model)\n', (2163, 2170), False, 'from lib.model import Model\n'), ((1963, 1981), 'shutil.rmtree', 'shutil.rmtree', (['run'], {}), '(run)\n', (1976, 1981), False, 'import shutil\n'), ((2210, 2228), 'shutil.rmtree', 'shutil.rmtree', (['run'], {}), '(run)\n', (2223, 2228), False, 'import shutil\n')]
|
import mysql.connector
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="",
database="PUCCR"
)
mycursor = mydb.cursor()
#mycursor.execute("SELECT * FROM users")
#myresult = mycursor.fetchall()
#for x in myresult:
# print(x)
import sys, os, dlib, glob, numpy
import cv2
from skimage import io
#取得預設的臉部偵測器
detector = dlib.get_frontal_face_detector()
# 人臉68特徵點模型路徑
predictor_path = "shape_predictor_68_face_landmarks.dat"
# 人臉辨識模型路徑
face_rec_model_path = "dlib_face_recognition_resnet_model_v1.dat"
# 比對人臉圖片資料夾
faces_folder_path = "./memberPic"
#載入人臉68特徵點檢測器
sp = dlib.shape_predictor(predictor_path)
#載入人臉辨識檢測器
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
### 處理資料夾裡每張圖片
### 建議
### descriptors, candidate 存入資料庫,後續直接讀取使用
#比對人臉描述子列表
descriptors = []
#比對人臉名稱列表
candidate = []
#針對比對資料夾裡每張圖片做比對:
#1.人臉偵測
#2.特徵點偵測
#3.取得描述子
for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
base = os.path.basename(f).split('.')[0] # 檔名
#依序取得圖片檔案人名
#candidate.append(base)
img = io.imread(f)
#1.人臉偵測
dets = detector(img, 1)
#2.特徵點偵測
for k, d in enumerate(dets):
shape = sp(img, d)
#3.取得描述子,68維特徵量
face_descriptor = facerec.compute_face_descriptor(img, shape)
#轉換numpy array格式
v = numpy.array(face_descriptor)
#將值放入descriptors
print(face_descriptor)
#descriptors.append(v)
sql = "INSERT INTO user (name, 68_face_landmark) VALUES (%s, %s)"
str1 = ' '.join(str(e) for e in face_descriptor)
val = (base, str1)
print(val)
mycursor.execute(sql, val)
mydb.commit()
|
[
"os.path.basename",
"dlib.face_recognition_model_v1",
"numpy.array",
"dlib.get_frontal_face_detector",
"dlib.shape_predictor",
"os.path.join",
"skimage.io.imread"
] |
[((354, 386), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (384, 386), False, 'import sys, os, dlib, glob, numpy\n'), ((602, 638), 'dlib.shape_predictor', 'dlib.shape_predictor', (['predictor_path'], {}), '(predictor_path)\n', (622, 638), False, 'import sys, os, dlib, glob, numpy\n'), ((661, 712), 'dlib.face_recognition_model_v1', 'dlib.face_recognition_model_v1', (['face_rec_model_path'], {}), '(face_rec_model_path)\n', (691, 712), False, 'import sys, os, dlib, glob, numpy\n'), ((898, 938), 'os.path.join', 'os.path.join', (['faces_folder_path', '"""*.jpg"""'], {}), "(faces_folder_path, '*.jpg')\n", (910, 938), False, 'import sys, os, dlib, glob, numpy\n'), ((1047, 1059), 'skimage.io.imread', 'io.imread', (['f'], {}), '(f)\n', (1056, 1059), False, 'from skimage import io\n'), ((1308, 1336), 'numpy.array', 'numpy.array', (['face_descriptor'], {}), '(face_descriptor)\n', (1319, 1336), False, 'import sys, os, dlib, glob, numpy\n'), ((952, 971), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (968, 971), False, 'import sys, os, dlib, glob, numpy\n')]
|
"""
Affine `n` space over a ring
"""
#*****************************************************************************
# Copyright (C) 2006 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from six import integer_types
from sage.functions.orthogonal_polys import chebyshev_T, chebyshev_U
from sage.rings.all import (PolynomialRing, ZZ, Integer)
from sage.rings.rational_field import is_RationalField
from sage.rings.polynomial.polynomial_ring import is_PolynomialRing
from sage.rings.polynomial.multi_polynomial_ring import is_MPolynomialRing
from sage.rings.finite_rings.finite_field_constructor import is_FiniteField
from sage.categories.map import Map
from sage.categories.fields import Fields
_Fields = Fields()
from sage.categories.homset import End
from sage.categories.number_fields import NumberFields
from sage.misc.all import latex
from sage.structure.category_object import normalize_names
from sage.schemes.generic.scheme import AffineScheme
from sage.schemes.generic.ambient_space import AmbientSpace
from sage.schemes.affine.affine_homset import SchemeHomset_points_affine
from sage.schemes.affine.affine_morphism import (SchemeMorphism_polynomial_affine_space,
SchemeMorphism_polynomial_affine_space_field,
SchemeMorphism_polynomial_affine_space_finite_field)
from sage.schemes.affine.affine_point import (SchemeMorphism_point_affine,
SchemeMorphism_point_affine_field,
SchemeMorphism_point_affine_finite_field)
def is_AffineSpace(x):
r"""
Returns True if ``x`` is an affine space.
EXAMPLES::
sage: from sage.schemes.affine.affine_space import is_AffineSpace
sage: is_AffineSpace(AffineSpace(5, names='x'))
True
sage: is_AffineSpace(AffineSpace(5, GF(9, 'alpha'), names='x'))
True
sage: is_AffineSpace(Spec(ZZ))
False
"""
return isinstance(x, AffineSpace_generic)
def AffineSpace(n, R=None, names='x'):
r"""
Return affine space of dimension ``n`` over the ring ``R``.
EXAMPLES:
The dimension and ring can be given in either order::
sage: AffineSpace(3, QQ, 'x')
Affine Space of dimension 3 over Rational Field
sage: AffineSpace(5, QQ, 'x')
Affine Space of dimension 5 over Rational Field
sage: A = AffineSpace(2, QQ, names='XY'); A
Affine Space of dimension 2 over Rational Field
sage: A.coordinate_ring()
Multivariate Polynomial Ring in X, Y over Rational Field
Use the divide operator for base extension::
sage: AffineSpace(5, names='x')/GF(17)
Affine Space of dimension 5 over Finite Field of size 17
The default base ring is `\ZZ`::
sage: AffineSpace(5, names='x')
Affine Space of dimension 5 over Integer Ring
There is also an affine space associated to each polynomial ring::
sage: R = GF(7)['x, y, z']
sage: A = AffineSpace(R); A
Affine Space of dimension 3 over Finite Field of size 7
sage: A.coordinate_ring() is R
True
"""
if (is_MPolynomialRing(n) or is_PolynomialRing(n)) and R is None:
R = n
A = AffineSpace(R.ngens(), R.base_ring(), R.variable_names())
A._coordinate_ring = R
return A
if isinstance(R, integer_types + (Integer,)):
n, R = R, n
if R is None:
R = ZZ # default is the integers
if names is None:
if n == 0:
names = ''
else:
raise TypeError("you must specify the variables names of the coordinate ring")
names = normalize_names(n, names)
if R in _Fields:
if is_FiniteField(R):
return AffineSpace_finite_field(n, R, names)
else:
return AffineSpace_field(n, R, names)
return AffineSpace_generic(n, R, names)
class AffineSpace_generic(AmbientSpace, AffineScheme):
"""
Affine space of dimension `n` over the ring `R`.
EXAMPLES::
sage: X.<x,y,z> = AffineSpace(3, QQ)
sage: X.base_scheme()
Spectrum of Rational Field
sage: X.base_ring()
Rational Field
sage: X.category()
Category of schemes over Rational Field
sage: X.structure_morphism()
Scheme morphism:
From: Affine Space of dimension 3 over Rational Field
To: Spectrum of Rational Field
Defn: Structure map
Loading and saving::
sage: loads(X.dumps()) == X
True
We create several other examples of affine spaces::
sage: AffineSpace(5, PolynomialRing(QQ, 'z'), 'Z')
Affine Space of dimension 5 over Univariate Polynomial Ring in z over Rational Field
sage: AffineSpace(RealField(), 3, 'Z')
Affine Space of dimension 3 over Real Field with 53 bits of precision
sage: AffineSpace(Qp(7), 2, 'x')
Affine Space of dimension 2 over 7-adic Field with capped relative precision 20
Even 0-dimensional affine spaces are supported::
sage: AffineSpace(0)
Affine Space of dimension 0 over Integer Ring
"""
def __init__(self, n, R, names):
"""
EXAMPLES::
sage: AffineSpace(3, Zp(5), 'y')
Affine Space of dimension 3 over 5-adic Ring with capped relative precision 20
"""
AmbientSpace.__init__(self, n, R)
self._assign_names(names)
AffineScheme.__init__(self, self.coordinate_ring(), R)
def __iter__(self):
"""
Return iterator over the elements of this affine space when defined over a finite field.
EXAMPLES::
sage: FF = FiniteField(3)
sage: AA = AffineSpace(FF, 0)
sage: [ x for x in AA ]
[()]
sage: AA = AffineSpace(FF, 1, 'Z')
sage: [ x for x in AA ]
[(0), (1), (2)]
sage: AA.<z,w> = AffineSpace(FF, 2)
sage: [ x for x in AA ]
[(0, 0), (1, 0), (2, 0), (0, 1), (1, 1), (2, 1), (0, 2), (1, 2), (2, 2)]
AUTHOR:
- <NAME>
"""
n = self.dimension_relative()
R = self.base_ring()
zero = R(0)
P = [ zero for _ in range(n) ]
yield self(P)
iters = [ iter(R) for _ in range(n) ]
for x in iters: next(x) # put at zero
i = 0
while i < n:
try:
P[i] = next(iters[i])
yield self(P)
i = 0
except StopIteration:
iters[i] = iter(R) # reset
next(iters[i]) # put at zero
P[i] = zero
i += 1
def ngens(self):
"""
Return the number of generators of self, i.e. the number of
variables in the coordinate ring of self.
EXAMPLES::
sage: AffineSpace(3, QQ).ngens()
3
sage: AffineSpace(7, ZZ).ngens()
7
"""
return self.dimension_relative()
def rational_points(self, F=None):
"""
Return the list of ``F``-rational points on the affine space self,
where ``F`` is a given finite field, or the base ring of self.
EXAMPLES::
sage: A = AffineSpace(1, GF(3))
sage: A.rational_points()
[(0), (1), (2)]
sage: A.rational_points(GF(3^2, 'b'))
[(0), (b), (b + 1), (2*b + 1), (2), (2*b), (2*b + 2), (b + 2), (1)]
sage: AffineSpace(2, ZZ).rational_points(GF(2))
[(0, 0), (1, 0), (0, 1), (1, 1)]
TESTS::
sage: AffineSpace(2, QQ).rational_points()
Traceback (most recent call last):
...
TypeError: base ring (= Rational Field) must be a finite field
sage: AffineSpace(1, GF(3)).rational_points(ZZ)
Traceback (most recent call last):
...
TypeError: second argument (= Integer Ring) must be a finite field
"""
if F is None:
if not is_FiniteField(self.base_ring()):
raise TypeError("base ring (= %s) must be a finite field"%self.base_ring())
return [ P for P in self ]
elif not is_FiniteField(F):
raise TypeError("second argument (= %s) must be a finite field"%F)
return [ P for P in self.base_extend(F) ]
def __eq__(self, right):
"""
Compare the space with ``right``.
EXAMPLES::
sage: AffineSpace(QQ, 3, 'a') == AffineSpace(ZZ, 3, 'a')
False
sage: AffineSpace(ZZ, 1, 'a') == AffineSpace(ZZ, 0, 'a')
False
sage: A = AffineSpace(ZZ, 1, 'x')
sage: loads(A.dumps()) == A
True
"""
if not isinstance(right, AffineSpace_generic):
return False
return (self.dimension_relative() == right.dimension_relative() and
self.coordinate_ring() == right.coordinate_ring())
def __ne__(self, other):
"""
Check whether the space is not equal to ``other``.
EXAMPLES::
sage: AffineSpace(QQ, 3, 'a') != AffineSpace(ZZ, 3, 'a')
True
sage: AffineSpace(ZZ, 1, 'a') != AffineSpace(ZZ, 0, 'a')
True
"""
return not (self == other)
def _latex_(self):
r"""
Return a LaTeX representation of this affine space.
EXAMPLES::
sage: print(latex(AffineSpace(1, ZZ, 'x')))
\mathbf{A}_{\Bold{Z}}^1
TESTS::
sage: AffineSpace(3, Zp(5), 'y')._latex_()
'\\mathbf{A}_{\\ZZ_{5}}^3'
"""
return "\\mathbf{A}_{%s}^%s"%(latex(self.base_ring()), self.dimension_relative())
def _morphism(self, *args, **kwds):
"""
Construct a morphism determined by action on points of this affine space.
INPUT:
Same as for
:class:`~sage.schemes.affine.affine_morphism.SchemeMorphism_polynomial_affine_space`.
OUTPUT:
A new instance of
:class:`~sage.schemes.affine.affine_morphism.SchemeMorphism_polynomial_affine_space`.
EXAMPLES::
sage: AA = AffineSpace(QQ, 3, 'a')
sage: AA.inject_variables()
Defining a0, a1, a2
sage: EndAA = AA.Hom(AA)
sage: AA._morphism(EndAA, [a0*a1, a1*a2, a0*a2])
Scheme endomorphism of Affine Space of dimension 3 over Rational Field
Defn: Defined on coordinates by sending (a0, a1, a2) to
(a0*a1, a1*a2, a0*a2)
"""
return SchemeMorphism_polynomial_affine_space(*args, **kwds)
def _point_homset(self, *args, **kwds):
"""
Construct a Hom-set for this affine space.
INPUT:
Same as for
:class:`~sage.schemes.affine.affine_homset.SchemeHomset_points_affine`.
OUTPUT:
A new instance of
:class:`~sage.schemes.affine.affine_homset.SchemeHomset_points_affine`.
EXAMPLES::
sage: AA = AffineSpace(QQ, 3, 'a')
sage: AA._point_homset(Spec(QQ), AA)
Set of rational points of Affine Space of dimension 3 over Rational Field
"""
return SchemeHomset_points_affine(*args, **kwds)
def _point(self, *args, **kwds):
r"""
Construct a point of affine space.
INPUT:
Same as for
:class:`~sage.schemes.affine.affine_point.SchemeMorphism_point_affine`.
OUTPUT:
A new instance of
:class:`~sage.schemes.affine.affine_point.SchemeMorphism_point_affine`.
TESTS::
sage: AA = AffineSpace(QQ, 3, 'a')
sage: AA._point(AA.point_homset(), [0, 1, 2])
(0, 1, 2)
"""
return SchemeMorphism_point_affine(*args, **kwds)
def _repr_(self):
"""
Return a string representation of this affine space.
EXAMPLES::
sage: AffineSpace(1, ZZ, 'x')
Affine Space of dimension 1 over Integer Ring
TESTS::
sage: AffineSpace(3, Zp(5), 'y')._repr_()
'Affine Space of dimension 3 over 5-adic Ring with capped relative precision 20'
"""
return "Affine Space of dimension %s over %s"%(self.dimension_relative(), self.base_ring())
def _repr_generic_point(self, polys=None):
"""
Return a string representation of the generic point
corresponding to the list of polys on this affine space.
If polys is None, the representation of the generic point of
the affine space is returned.
EXAMPLES::
sage: A.<x, y> = AffineSpace(2, ZZ)
sage: A._repr_generic_point([y-x^2])
'(-x^2 + y)'
sage: A._repr_generic_point()
'(x, y)'
"""
if polys is None:
polys = self.gens()
return '(%s)'%(", ".join([str(f) for f in polys]))
def _latex_generic_point(self, v=None):
"""
Return a LaTeX representation of the generic point
corresponding to the list of polys ``v`` on this affine space.
If ``v`` is None, the representation of the generic point of
the affine space is returned.
EXAMPLES::
sage: A.<x, y> = AffineSpace(2, ZZ)
sage: A._latex_generic_point([y-x^2])
'\\left(- x^{2} + y\\right)'
sage: A._latex_generic_point()
'\\left(x, y\\right)'
"""
if v is None:
v = self.gens()
return '\\left(%s\\right)'%(", ".join([str(latex(f)) for f in v]))
def _check_satisfies_equations(self, v):
"""
Return True if ``v`` defines a point on the scheme self; raise a
TypeError otherwise.
EXAMPLES::
sage: A = AffineSpace(3, ZZ)
sage: A._check_satisfies_equations([1, 1, 0])
True
sage: A._check_satisfies_equations((0, 1, 0))
True
sage: A._check_satisfies_equations([0, 0, 0])
True
sage: A._check_satisfies_equations([1, 2, 3, 4, 5])
Traceback (most recent call last):
...
TypeError: the list v=[1, 2, 3, 4, 5] must have 3 components
sage: A._check_satisfies_equations([1/2, 1, 1])
Traceback (most recent call last):
...
TypeError: the components of v=[1/2, 1, 1] must be elements of Integer Ring
sage: A._check_satisfies_equations(5)
Traceback (most recent call last):
...
TypeError: the argument v=5 must be a list or tuple
"""
if not isinstance(v, (list, tuple)):
raise TypeError('the argument v=%s must be a list or tuple'%v)
n = self.ngens()
if not len(v) == n:
raise TypeError('the list v=%s must have %s components'%(v, n))
R = self.base_ring()
from sage.structure.sequence import Sequence
if not Sequence(v).universe() == R:
raise TypeError('the components of v=%s must be elements of %s'%(v, R))
return True
def __pow__(self, m):
"""
Return the Cartesian power of this space.
INPUT:
- ``m`` -- integer.
OUTPUT:
- affine ambient space.
EXAMPLES::
sage: A = AffineSpace(1, QQ, 'x')
sage: A5 = A^5; A5
Affine Space of dimension 5 over Rational Field
sage: A5.variable_names()
('x0', 'x1', 'x2', 'x3', 'x4')
sage: A2 = AffineSpace(2, QQ, "x, y")
sage: A4 = A2^2; A4
Affine Space of dimension 4 over Rational Field
sage: A4.variable_names()
('x0', 'x1', 'x2', 'x3')
As you see, custom variable names are not preserved by power operator,
since there is no natural way to make new ones in general.
"""
mm = int(m)
if mm != m:
raise ValueError("m must be an integer")
return AffineSpace(self.dimension_relative() * mm, self.base_ring())
def __mul__(self, right):
r"""
Create the product of affine spaces.
INPUT:
- ``right`` - an affine space or subscheme.
OUTPUT: an affine space.= or subscheme.
EXAMPLES::
sage: A1 = AffineSpace(QQ, 1, 'x')
sage: A2 = AffineSpace(QQ, 2, 'y')
sage: A3 = A1*A2; A3
Affine Space of dimension 3 over Rational Field
sage: A3.variable_names()
('x', 'y0', 'y1')
::
sage: A2 = AffineSpace(ZZ, 2, 't')
sage: A3 = AffineSpace(ZZ, 3, 'x')
sage: A3.inject_variables()
Defining x0, x1, x2
sage: X = A3.subscheme([x0*x2 - x1])
sage: A2*X
Closed subscheme of Affine Space of dimension 5 over Integer Ring defined by:
x0*x2 - x1
::
sage: S = ProjectiveSpace(QQ, 3, 'x')
sage: T = AffineSpace(2, QQ, 'y')
sage: T*S
Traceback (most recent call last):
...
TypeError: Projective Space of dimension 3 over Rational Field
must be an affine space or affine subscheme
"""
if self.base_ring() != right.base_ring():
raise ValueError ('Must have the same base ring')
from sage.schemes.generic.algebraic_scheme import AlgebraicScheme_subscheme
if isinstance(right, AffineSpace_generic):
if self is right:
return self.__pow__(2)
return AffineSpace(self.dimension_relative() + right.dimension_relative(),\
self.base_ring(), self.variable_names() + right.variable_names())
elif isinstance(right, AlgebraicScheme_subscheme):
AS = self*right.ambient_space()
CR = AS.coordinate_ring()
n = self.ambient_space().coordinate_ring().ngens()
phi = self.ambient_space().coordinate_ring().hom(list(CR.gens()[:n]), CR)
psi = right.ambient_space().coordinate_ring().hom(list(CR.gens()[n:]), CR)
return AS.subscheme([phi(t) for t in self.defining_polynomials()] + [psi(t) for t in right.defining_polynomials()])
else:
raise TypeError('%s must be an affine space or affine subscheme'%right)
def change_ring(self, R):
r"""
Return an affine space over ring ``R`` and otherwise the same as this space.
INPUT:
- ``R`` -- commutative ring or morphism.
OUTPUT:
- affine space over ``R``.
.. NOTE::
There is no need to have any relation between `R` and the base ring
of this space, if you want to have such a relation, use
``self.base_extend(R)`` instead.
EXAMPLES::
sage: A.<x,y,z> = AffineSpace(3, ZZ)
sage: AQ = A.change_ring(QQ); AQ
Affine Space of dimension 3 over Rational Field
sage: AQ.change_ring(GF(5))
Affine Space of dimension 3 over Finite Field of size 5
::
sage: K.<w> = QuadraticField(5)
sage: A = AffineSpace(K,2,'t')
sage: A.change_ring(K.embeddings(CC)[1])
Affine Space of dimension 2 over Complex Field with 53 bits of precision
"""
if isinstance(R, Map):
return AffineSpace(self.dimension_relative(), R.codomain(), self.variable_names())
else:
return AffineSpace(self.dimension_relative(), R, self.variable_names())
def coordinate_ring(self):
"""
Return the coordinate ring of this scheme, if defined.
EXAMPLES::
sage: R = AffineSpace(2, GF(9,'alpha'), 'z').coordinate_ring(); R
Multivariate Polynomial Ring in z0, z1 over Finite Field in alpha of size 3^2
sage: AffineSpace(3, R, 'x').coordinate_ring()
Multivariate Polynomial Ring in x0, x1, x2 over Multivariate Polynomial Ring
in z0, z1 over Finite Field in alpha of size 3^2
"""
try:
return self._coordinate_ring
except AttributeError:
self._coordinate_ring = PolynomialRing(self.base_ring(),
self.dimension_relative(), names=self.variable_names())
return self._coordinate_ring
def _validate(self, polynomials):
"""
If ``polynomials`` is a tuple of valid polynomial functions on the affine space,
return ``polynomials``, otherwise raise TypeError.
Since this is an affine space, all polynomials are valid.
INPUT:
- ``polynomials`` -- tuple of polynomials in the coordinate ring of
this space.
OUTPUT:
- tuple of polynomials in the coordinate ring of this space.
EXAMPLES::
sage: A.<x, y, z> = AffineSpace(3, ZZ)
sage: A._validate((x*y - z, 1))
(x*y - z, 1)
"""
return polynomials
def projective_embedding(self, i=None, PP=None):
"""
Returns a morphism from this space into an ambient projective space
of the same dimension.
INPUT:
- ``i`` -- integer (default: dimension of self = last
coordinate) determines which projective embedding to compute. The
embedding is that which has a 1 in the i-th coordinate, numbered
from 0.
- ``PP`` -- (default: None) ambient projective space, i.e.,
codomain of morphism; this is constructed if it is not
given.
EXAMPLES::
sage: AA = AffineSpace(2, QQ, 'x')
sage: pi = AA.projective_embedding(0); pi
Scheme morphism:
From: Affine Space of dimension 2 over Rational Field
To: Projective Space of dimension 2 over Rational Field
Defn: Defined on coordinates by sending (x0, x1) to
(1 : x0 : x1)
sage: z = AA(3, 4)
sage: pi(z)
(1/4 : 3/4 : 1)
sage: pi(AA(0,2))
(1/2 : 0 : 1)
sage: pi = AA.projective_embedding(1); pi
Scheme morphism:
From: Affine Space of dimension 2 over Rational Field
To: Projective Space of dimension 2 over Rational Field
Defn: Defined on coordinates by sending (x0, x1) to
(x0 : 1 : x1)
sage: pi(z)
(3/4 : 1/4 : 1)
sage: pi = AA.projective_embedding(2)
sage: pi(z)
(3 : 4 : 1)
::
sage: A.<x,y> = AffineSpace(ZZ, 2)
sage: A.projective_embedding(2).codomain().affine_patch(2) == A
True
"""
n = self.dimension_relative()
if i is None:
try:
i = self._default_embedding_index
except AttributeError:
i = int(n)
else:
i = int(i)
try:
phi = self.__projective_embedding[i]
#assume that if you've passed in a new codomain you want to override
#the existing embedding
if PP is None or phi.codomain() == PP:
return(phi)
except AttributeError:
self.__projective_embedding = {}
except KeyError:
pass
#if no i-th embedding exists, we may still be here with PP==None
if PP is None:
from sage.schemes.projective.projective_space import ProjectiveSpace
PP = ProjectiveSpace(n, self.base_ring())
elif PP.dimension_relative() != n:
raise ValueError("projective Space must be of dimension %s"%(n))
R = self.coordinate_ring()
v = list(R.gens())
if n < 0 or n >self.dimension_relative():
raise ValueError("argument i (=%s) must be between 0 and %s, inclusive"%(i,n))
v.insert(i, R(1))
phi = self.hom(v, PP)
self.__projective_embedding[i] = phi
#make affine patch and projective embedding match
PP.affine_patch(i,self)
return phi
def subscheme(self, X):
"""
Return the closed subscheme defined by ``X``.
INPUT:
- ``X`` - a list or tuple of equations.
EXAMPLES::
sage: A.<x,y> = AffineSpace(QQ, 2)
sage: X = A.subscheme([x, y^2, x*y^2]); X
Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
x,
y^2,
x*y^2
::
sage: X.defining_polynomials ()
(x, y^2, x*y^2)
sage: I = X.defining_ideal(); I
Ideal (x, y^2, x*y^2) of Multivariate Polynomial Ring in x, y over Rational Field
sage: I.groebner_basis()
[y^2, x]
sage: X.dimension()
0
sage: X.base_ring()
Rational Field
sage: X.base_scheme()
Spectrum of Rational Field
sage: X.structure_morphism()
Scheme morphism:
From: Closed subscheme of Affine Space of dimension 2 over Rational Field defined by:
x,
y^2,
x*y^2
To: Spectrum of Rational Field
Defn: Structure map
sage: X.dimension()
0
"""
from sage.schemes.affine.affine_subscheme import AlgebraicScheme_subscheme_affine
return AlgebraicScheme_subscheme_affine(self, X)
def _an_element_(self):
r"""
Returns an element of this affine space,used both for illustration and testing purposes.
OUTPUT: A point in the affine space.
EXAMPLES::
sage: AffineSpace(ZZ, 2, 'x').an_element()
(5, 4)
sage: AffineSpace(Qp(5), 2, 'x').an_element()
(5^2 + O(5^22), 4*5 + O(5^21))
"""
n = self.dimension_relative()
R = self.base_ring()
return self([(5 - i) * R.an_element() for i in range(n)])
def chebyshev_polynomial(self, n, kind='first'):
"""
Generates an endomorphism of this affine line by a Chebyshev polynomial.
Chebyshev polynomials are a sequence of recursively defined orthogonal
polynomials. Chebyshev of the first kind are defined as `T_0(x) = 1`,
`T_1(x) = x`, and `T_{n+1}(x) = 2xT_n(x) - T_{n-1}(x)`. Chebyshev of
the second kind are defined as `U_0(x) = 1`,
`U_1(x) = 2x`, and `U_{n+1}(x) = 2xU_n(x) - U_{n-1}(x)`.
INPUT:
- ``n`` -- a non-negative integer.
- ``kind`` -- ``first`` or ``second`` specifying which kind of chebyshev the user would like
to generate. Defaults to ``first``.
OUTPUT: :class:`DynamicalSystem_affine`
EXAMPLES::
sage: A.<x> = AffineSpace(QQ, 1)
sage: A.chebyshev_polynomial(5, 'first')
Dynamical System of Affine Space of dimension 1 over Rational Field
Defn: Defined on coordinates by sending (x) to
(16*x^5 - 20*x^3 + 5*x)
::
sage: A.<x> = AffineSpace(QQ, 1)
sage: A.chebyshev_polynomial(3, 'second')
Dynamical System of Affine Space of dimension 1 over Rational Field
Defn: Defined on coordinates by sending (x) to
(8*x^3 - 4*x)
::
sage: A.<x> = AffineSpace(QQ, 1)
sage: A.chebyshev_polynomial(3, 2)
Traceback (most recent call last):
...
ValueError: keyword 'kind' must have a value of either 'first' or 'second'
::
sage: A.<x> = AffineSpace(QQ, 1)
sage: A.chebyshev_polynomial(-4, 'second')
Traceback (most recent call last):
...
ValueError: first parameter 'n' must be a non-negative integer
::
sage: A = AffineSpace(QQ, 2, 'x')
sage: A.chebyshev_polynomial(2)
Traceback (most recent call last):
...
TypeError: affine space must be of dimension 1
"""
if self.dimension_relative() != 1:
raise TypeError("affine space must be of dimension 1")
n = ZZ(n)
if (n < 0):
raise ValueError("first parameter 'n' must be a non-negative integer")
from sage.dynamics.arithmetic_dynamics.affine_ds import DynamicalSystem_affine
if kind == 'first':
return DynamicalSystem_affine([chebyshev_T(n, self.gen(0))], domain=self)
elif kind == 'second':
return DynamicalSystem_affine([chebyshev_U(n, self.gen(0))], domain=self)
else:
raise ValueError("keyword 'kind' must have a value of either 'first' or 'second'")
class AffineSpace_field(AffineSpace_generic):
def _point(self, *args, **kwds):
"""
Construct a point.
For internal use only. See :mod:`morphism` for details.
TESTS::
sage: P2.<x,y,z> = AffineSpace(3, GF(3))
sage: point_homset = P2._point_homset(Spec(GF(3)), P2)
sage: P2._point(point_homset, [1, 2, 3])
(1, 2, 0)
"""
return SchemeMorphism_point_affine_field(*args, **kwds)
def _morphism(self, *args, **kwds):
"""
Construct a morphism.
For internal use only. See :mod:`morphism` for details.
TESTS::
sage: P2.<x,y,z> = AffineSpace(3, GF(3))
sage: P2._morphism(P2.Hom(P2), [x, y, z])
Scheme endomorphism of Affine Space of dimension 3 over Finite Field of size 3
Defn: Defined on coordinates by sending (x, y, z) to
(x, y, z)
"""
return SchemeMorphism_polynomial_affine_space_field(*args, **kwds)
def points_of_bounded_height(self, **kwds):
r"""
Returns an iterator of the points in this affine space of
absolute height of at most the given bound.
Bound check is strict for the rational field.
Requires this space to be affine space over a number field. Uses the
Doyle-Krumm algorithm 4 (algorithm 5 for imaginary quadratic) for
computing algebraic numbers up to a given height [Doyle-Krumm]_.
The algorithm requires floating point arithmetic, so the user is
allowed to specify the precision for such calculations.
Additionally, due to floating point issues, points
slightly larger than the bound may be returned. This can be controlled
by lowering the tolerance.
INPUT:
kwds:
- ``bound`` - a real number
- ``tolerance`` - a rational number in (0,1] used in doyle-krumm algorithm-4
- ``precision`` - the precision to use for computing the elements of bounded height of number fields
OUTPUT:
- an iterator of points in self
EXAMPLES::
sage: A.<x,y> = AffineSpace(QQ, 2)
sage: list(A.points_of_bounded_height(bound=3))
[(0, 0), (1, 0), (-1, 0), (1/2, 0), (-1/2, 0), (2, 0), (-2, 0), (0, 1),
(1, 1), (-1, 1), (1/2, 1), (-1/2, 1), (2, 1), (-2, 1), (0, -1), (1, -1),
(-1, -1), (1/2, -1), (-1/2, -1), (2, -1), (-2, -1), (0, 1/2), (1, 1/2),
(-1, 1/2), (1/2, 1/2), (-1/2, 1/2), (2, 1/2), (-2, 1/2), (0, -1/2), (1, -1/2),
(-1, -1/2), (1/2, -1/2), (-1/2, -1/2), (2, -1/2), (-2, -1/2), (0, 2), (1, 2),
(-1, 2), (1/2, 2), (-1/2, 2), (2, 2), (-2, 2), (0, -2), (1, -2), (-1, -2), (1/2, -2),
(-1/2, -2), (2, -2), (-2, -2)]
::
sage: u = QQ['u'].0
sage: A.<x,y> = AffineSpace(NumberField(u^2 - 2, 'v'), 2)
sage: len(list(A.points_of_bounded_height(bound=2, tolerance=0.1)))
529
"""
if (is_RationalField(self.base_ring())):
ftype = False # stores whether field is a number field or the rational field
elif (self.base_ring() in NumberFields()): # true for rational field as well, so check is_RationalField first
ftype = True
else:
raise NotImplementedError("self must be affine space over a number field.")
bound = kwds.pop('bound')
B = bound**self.base_ring().absolute_degree() # convert to relative height
n = self.dimension_relative()
R = self.base_ring()
zero = R(0)
P = [ zero for _ in range(n) ]
yield self(P)
if not ftype:
iters = [ R.range_by_height(B) for _ in range(n) ]
else:
tol = kwds.pop('tolerance', 1e-2)
prec = kwds.pop('precision', 53)
iters = [ R.elements_of_bounded_height(bound=B, tolerance=tol, precision=prec) for _ in range(n) ]
for x in iters: next(x) # put at zero
i = 0
while i < n:
try:
P[i] = next(iters[i])
yield self(P)
i = 0
except StopIteration:
if not ftype:
iters[i] = R.range_by_height(B) # reset
else:
iters[i] = R.elements_of_bounded_height(bound=B, tolerance=tol, precision=prec)
next(iters[i]) # put at zero
P[i] = zero
i += 1
def weil_restriction(self):
r"""
Compute the Weil restriction of this affine space over some extension
field.
If the field is a finite field, then this computes
the Weil restriction to the prime subfield.
OUTPUT: Affine space of dimension ``d * self.dimension_relative()``
over the base field of ``self.base_ring()``.
EXAMPLES::
sage: R.<x> = QQ[]
sage: K.<w> = NumberField(x^5-2)
sage: AK.<x,y> = AffineSpace(K, 2)
sage: AK.weil_restriction()
Affine Space of dimension 10 over Rational Field
sage: R.<x> = K[]
sage: L.<v> = K.extension(x^2+1)
sage: AL.<x,y> = AffineSpace(L, 2)
sage: AL.weil_restriction()
Affine Space of dimension 4 over Number Field in w with defining
polynomial x^5 - 2
"""
try:
X = self.__weil_restriction
except AttributeError:
L = self.base_ring()
if L.is_finite():
d = L.degree()
K = L.prime_subfield()
else:
d = L.relative_degree()
K = L.base_field()
if d == 1:
X = self
else:
X = AffineSpace(K, d*self.dimension_relative(), 'z')
self.__weil_restriction = X
return X
def curve(self,F):
r"""
Return a curve defined by ``F`` in this affine space.
INPUT:
- ``F`` -- a polynomial, or a list or tuple of polynomials in
the coordinate ring of this affine space.
EXAMPLES::
sage: A.<x,y,z> = AffineSpace(QQ, 3)
sage: A.curve([y - x^4, z - y^5])
Affine Curve over Rational Field defined by -x^4 + y, -y^5 + z
"""
from sage.schemes.curves.constructor import Curve
return Curve(F, self)
class AffineSpace_finite_field(AffineSpace_field):
def _point(self, *args, **kwds):
"""
Construct a point.
For internal use only. See :mod:`morphism` for details.
TESTS::
sage: P2.<x,y,z> = AffineSpace(3, GF(3))
sage: point_homset = P2._point_homset(Spec(GF(3)), P2)
sage: P2._point(point_homset, [1, 2, 3])
(1, 2, 0)
"""
return SchemeMorphism_point_affine_finite_field(*args, **kwds)
def _morphism(self, *args, **kwds):
"""
Construct a morphism.
For internal use only. See :mod:`morphism` for details.
TESTS::
sage: P2.<x,y,z> = AffineSpace(3, GF(3))
sage: P2._morphism(P2.Hom(P2), [x, y, z])
Scheme endomorphism of Affine Space of dimension 3 over Finite Field of size 3
Defn: Defined on coordinates by sending (x, y, z) to
(x, y, z)
"""
return SchemeMorphism_polynomial_affine_space_finite_field(*args, **kwds)
#fix the pickles from moving affine_space.py
from sage.misc.persist import register_unpickle_override
register_unpickle_override('sage.schemes.generic.affine_space',
'AffineSpace_generic',
AffineSpace_generic)
|
[
"sage.schemes.affine.affine_point.SchemeMorphism_point_affine",
"sage.structure.category_object.normalize_names",
"sage.schemes.generic.ambient_space.AmbientSpace.__init__",
"sage.schemes.affine.affine_subscheme.AlgebraicScheme_subscheme_affine",
"sage.schemes.affine.affine_homset.SchemeHomset_points_affine",
"sage.schemes.curves.constructor.Curve",
"sage.misc.persist.register_unpickle_override",
"sage.schemes.affine.affine_point.SchemeMorphism_point_affine_finite_field",
"sage.schemes.affine.affine_point.SchemeMorphism_point_affine_field",
"sage.schemes.affine.affine_morphism.SchemeMorphism_polynomial_affine_space_field",
"sage.rings.polynomial.multi_polynomial_ring.is_MPolynomialRing",
"sage.rings.finite_rings.finite_field_constructor.is_FiniteField",
"sage.structure.sequence.Sequence",
"sage.rings.polynomial.polynomial_ring.is_PolynomialRing",
"sage.misc.all.latex",
"sage.schemes.affine.affine_morphism.SchemeMorphism_polynomial_affine_space",
"sage.categories.number_fields.NumberFields",
"sage.rings.all.ZZ",
"sage.schemes.affine.affine_morphism.SchemeMorphism_polynomial_affine_space_finite_field",
"sage.categories.fields.Fields"
] |
[((920, 928), 'sage.categories.fields.Fields', 'Fields', ([], {}), '()\n', (926, 928), False, 'from sage.categories.fields import Fields\n'), ((36739, 36850), 'sage.misc.persist.register_unpickle_override', 'register_unpickle_override', (['"""sage.schemes.generic.affine_space"""', '"""AffineSpace_generic"""', 'AffineSpace_generic'], {}), "('sage.schemes.generic.affine_space',\n 'AffineSpace_generic', AffineSpace_generic)\n", (36765, 36850), False, 'from sage.misc.persist import register_unpickle_override\n'), ((3923, 3948), 'sage.structure.category_object.normalize_names', 'normalize_names', (['n', 'names'], {}), '(n, names)\n', (3938, 3948), False, 'from sage.structure.category_object import normalize_names\n'), ((3981, 3998), 'sage.rings.finite_rings.finite_field_constructor.is_FiniteField', 'is_FiniteField', (['R'], {}), '(R)\n', (3995, 3998), False, 'from sage.rings.finite_rings.finite_field_constructor import is_FiniteField\n'), ((5648, 5681), 'sage.schemes.generic.ambient_space.AmbientSpace.__init__', 'AmbientSpace.__init__', (['self', 'n', 'R'], {}), '(self, n, R)\n', (5669, 5681), False, 'from sage.schemes.generic.ambient_space import AmbientSpace\n'), ((10901, 10954), 'sage.schemes.affine.affine_morphism.SchemeMorphism_polynomial_affine_space', 'SchemeMorphism_polynomial_affine_space', (['*args'], {}), '(*args, **kwds)\n', (10939, 10954), False, 'from sage.schemes.affine.affine_morphism import SchemeMorphism_polynomial_affine_space, SchemeMorphism_polynomial_affine_space_field, SchemeMorphism_polynomial_affine_space_finite_field\n'), ((11534, 11575), 'sage.schemes.affine.affine_homset.SchemeHomset_points_affine', 'SchemeHomset_points_affine', (['*args'], {}), '(*args, **kwds)\n', (11560, 11575), False, 'from sage.schemes.affine.affine_homset import SchemeHomset_points_affine\n'), ((12083, 12125), 'sage.schemes.affine.affine_point.SchemeMorphism_point_affine', 'SchemeMorphism_point_affine', (['*args'], {}), '(*args, **kwds)\n', (12110, 12125), False, 'from sage.schemes.affine.affine_point import SchemeMorphism_point_affine, SchemeMorphism_point_affine_field, SchemeMorphism_point_affine_finite_field\n'), ((25822, 25863), 'sage.schemes.affine.affine_subscheme.AlgebraicScheme_subscheme_affine', 'AlgebraicScheme_subscheme_affine', (['self', 'X'], {}), '(self, X)\n', (25854, 25863), False, 'from sage.schemes.affine.affine_subscheme import AlgebraicScheme_subscheme_affine\n'), ((28584, 28589), 'sage.rings.all.ZZ', 'ZZ', (['n'], {}), '(n)\n', (28586, 28589), False, 'from sage.rings.all import PolynomialRing, ZZ, Integer\n'), ((29548, 29596), 'sage.schemes.affine.affine_point.SchemeMorphism_point_affine_field', 'SchemeMorphism_point_affine_field', (['*args'], {}), '(*args, **kwds)\n', (29581, 29596), False, 'from sage.schemes.affine.affine_point import SchemeMorphism_point_affine, SchemeMorphism_point_affine_field, SchemeMorphism_point_affine_finite_field\n'), ((30085, 30144), 'sage.schemes.affine.affine_morphism.SchemeMorphism_polynomial_affine_space_field', 'SchemeMorphism_polynomial_affine_space_field', (['*args'], {}), '(*args, **kwds)\n', (30129, 30144), False, 'from sage.schemes.affine.affine_morphism import SchemeMorphism_polynomial_affine_space, SchemeMorphism_polynomial_affine_space_field, SchemeMorphism_polynomial_affine_space_finite_field\n'), ((35577, 35591), 'sage.schemes.curves.constructor.Curve', 'Curve', (['F', 'self'], {}), '(F, self)\n', (35582, 35591), False, 'from sage.schemes.curves.constructor import Curve\n'), ((36025, 36080), 'sage.schemes.affine.affine_point.SchemeMorphism_point_affine_finite_field', 'SchemeMorphism_point_affine_finite_field', (['*args'], {}), '(*args, **kwds)\n', (36065, 36080), False, 'from sage.schemes.affine.affine_point import SchemeMorphism_point_affine, SchemeMorphism_point_affine_field, SchemeMorphism_point_affine_finite_field\n'), ((36569, 36635), 'sage.schemes.affine.affine_morphism.SchemeMorphism_polynomial_affine_space_finite_field', 'SchemeMorphism_polynomial_affine_space_finite_field', (['*args'], {}), '(*args, **kwds)\n', (36620, 36635), False, 'from sage.schemes.affine.affine_morphism import SchemeMorphism_polynomial_affine_space, SchemeMorphism_polynomial_affine_space_field, SchemeMorphism_polynomial_affine_space_finite_field\n'), ((3418, 3439), 'sage.rings.polynomial.multi_polynomial_ring.is_MPolynomialRing', 'is_MPolynomialRing', (['n'], {}), '(n)\n', (3436, 3439), False, 'from sage.rings.polynomial.multi_polynomial_ring import is_MPolynomialRing\n'), ((3443, 3463), 'sage.rings.polynomial.polynomial_ring.is_PolynomialRing', 'is_PolynomialRing', (['n'], {}), '(n)\n', (3460, 3463), False, 'from sage.rings.polynomial.polynomial_ring import is_PolynomialRing\n'), ((8507, 8524), 'sage.rings.finite_rings.finite_field_constructor.is_FiniteField', 'is_FiniteField', (['F'], {}), '(F)\n', (8521, 8524), False, 'from sage.rings.finite_rings.finite_field_constructor import is_FiniteField\n'), ((32336, 32350), 'sage.categories.number_fields.NumberFields', 'NumberFields', ([], {}), '()\n', (32348, 32350), False, 'from sage.categories.number_fields import NumberFields\n'), ((13893, 13901), 'sage.misc.all.latex', 'latex', (['f'], {}), '(f)\n', (13898, 13901), False, 'from sage.misc.all import latex\n'), ((15310, 15321), 'sage.structure.sequence.Sequence', 'Sequence', (['v'], {}), '(v)\n', (15318, 15321), False, 'from sage.structure.sequence import Sequence\n')]
|
# Copyright 2017 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Implementation of ini add / remove for devstack. We don't use the
# python ConfigFile parser because that ends up rewriting the entire
# file and doesn't ensure comments remain.
import fixtures
import testtools
from devstack import dsconf
BASIC = """
[[local|localrc]]
a=b
c=d
f=1
[[post-config|$NEUTRON_CONF]]
[DEFAULT]
global_physnet_mtu=1450
[[post-config|$NOVA_CONF]]
[upgrade_levels]
compute = auto
"""
BASIC_NO_LOCAL = """
[[post-config|$NEUTRON_CONF]]
[DEFAULT]
global_physnet_mtu=1450
[[post-config|$NOVA_CONF]]
[upgrade_levels]
compute = auto
"""
RESULT1 = """
[[local|localrc]]
a=b
c=d
f=1
g=2
[[post-config|$NEUTRON_CONF]]
[DEFAULT]
global_physnet_mtu=1450
[[post-config|$NOVA_CONF]]
[upgrade_levels]
compute = auto
"""
RESULT2 = """
[[local|localrc]]
a=b
c=d
f=1
a=2
[[post-config|$NEUTRON_CONF]]
[DEFAULT]
global_physnet_mtu=1450
[[post-config|$NOVA_CONF]]
[upgrade_levels]
compute = auto
"""
RESULT3 = """
[[local|localrc]]
a=b
c=d
f=1
enable_plugin foo http://foo branch
enable_plugin bar http://foo branch
[[post-config|$NEUTRON_CONF]]
[DEFAULT]
global_physnet_mtu=1450
[[post-config|$NOVA_CONF]]
[upgrade_levels]
compute = auto
"""
RESULT_NO_LOCAL = """
[[local|localrc]]
a=b
c=d
[[post-config|$NEUTRON_CONF]]
[DEFAULT]
global_physnet_mtu=1450
[[post-config|$NOVA_CONF]]
[upgrade_levels]
compute = auto
"""
class TestLcSet(testtools.TestCase):
def setUp(self):
super(TestLcSet, self).setUp()
self._path = self.useFixture(fixtures.TempDir()).path
self._path += "/local.conf"
with open(self._path, "w") as f:
f.write(BASIC)
def test_set_new(self):
conf = dsconf.LocalConf(self._path)
conf.set_local("g=2")
with open(self._path) as f:
content = f.read()
self.assertEqual(content, RESULT1)
def test_set_existing(self):
conf = dsconf.LocalConf(self._path)
conf.set_local("a=2")
with open(self._path) as f:
content = f.read()
self.assertEqual(content, RESULT2)
def test_set_raw(self):
conf = dsconf.LocalConf(self._path)
conf.set_local("enable_plugin foo http://foo branch")
conf.set_local("enable_plugin bar http://foo branch")
with open(self._path) as f:
content = f.read()
self.assertEqual(content, RESULT3)
def test_set_raw_multiline(self):
conf = dsconf.LocalConf(self._path)
conf.set_local("enable_plugin foo http://foo branch"
"\nenable_plugin bar http://foo branch")
with open(self._path) as f:
content = f.read()
self.assertEqual(content, RESULT3)
class TestNoLocal(testtools.TestCase):
def setUp(self):
super(TestNoLocal, self).setUp()
self._path = self.useFixture(fixtures.TempDir()).path
self._path += "/local.conf"
with open(self._path, "w") as f:
f.write(BASIC_NO_LOCAL)
def test_set_new(self):
conf = dsconf.LocalConf(self._path)
conf.set_local("a=b")
conf.set_local("c=d")
with open(self._path) as f:
content = f.read()
self.assertEqual(content, RESULT_NO_LOCAL)
|
[
"devstack.dsconf.LocalConf",
"fixtures.TempDir"
] |
[((2212, 2240), 'devstack.dsconf.LocalConf', 'dsconf.LocalConf', (['self._path'], {}), '(self._path)\n', (2228, 2240), False, 'from devstack import dsconf\n'), ((2434, 2462), 'devstack.dsconf.LocalConf', 'dsconf.LocalConf', (['self._path'], {}), '(self._path)\n', (2450, 2462), False, 'from devstack import dsconf\n'), ((2651, 2679), 'devstack.dsconf.LocalConf', 'dsconf.LocalConf', (['self._path'], {}), '(self._path)\n', (2667, 2679), False, 'from devstack import dsconf\n'), ((2972, 3000), 'devstack.dsconf.LocalConf', 'dsconf.LocalConf', (['self._path'], {}), '(self._path)\n', (2988, 3000), False, 'from devstack import dsconf\n'), ((3563, 3591), 'devstack.dsconf.LocalConf', 'dsconf.LocalConf', (['self._path'], {}), '(self._path)\n', (3579, 3591), False, 'from devstack import dsconf\n'), ((2039, 2057), 'fixtures.TempDir', 'fixtures.TempDir', ([], {}), '()\n', (2055, 2057), False, 'import fixtures\n'), ((3381, 3399), 'fixtures.TempDir', 'fixtures.TempDir', ([], {}), '()\n', (3397, 3399), False, 'import fixtures\n')]
|
from flask_restplus import Namespace, Resource, fields
from flask_login import current_user
api = Namespace("checkout", description="Checkout related operations")
cart = api.model(
"CartLine",
{
"id": fields.Integer(required=True, description="The checkout cartline id"),
"quantity": fields.Integer(required=True, description="The cart item num"),
"title": fields.String(
description="The cart item title", attribute="variant.product.title"
),
"variant": fields.String(
description="The cart item variant", attribute="variant.title"
),
"product_id": fields.Integer(
description="The cart item product", attribute="variant.product.id"
),
"price": fields.Float(
description="The cart item price", attribute="variant.price"
),
"first_img": fields.String(
description="The cart item image", attribute="variant.product.first_img"
),
},
)
@api.route("/cart")
class CartIndex(Resource):
@api.doc("list_products")
@api.marshal_list_with(cart)
def get(self):
"""List current user cart items"""
cartitems = current_user.cart.lines
return cartitems
|
[
"flask_restplus.fields.Float",
"flask_restplus.fields.String",
"flask_restplus.Namespace",
"flask_restplus.fields.Integer"
] |
[((100, 164), 'flask_restplus.Namespace', 'Namespace', (['"""checkout"""'], {'description': '"""Checkout related operations"""'}), "('checkout', description='Checkout related operations')\n", (109, 164), False, 'from flask_restplus import Namespace, Resource, fields\n'), ((220, 289), 'flask_restplus.fields.Integer', 'fields.Integer', ([], {'required': '(True)', 'description': '"""The checkout cartline id"""'}), "(required=True, description='The checkout cartline id')\n", (234, 289), False, 'from flask_restplus import Namespace, Resource, fields\n'), ((311, 373), 'flask_restplus.fields.Integer', 'fields.Integer', ([], {'required': '(True)', 'description': '"""The cart item num"""'}), "(required=True, description='The cart item num')\n", (325, 373), False, 'from flask_restplus import Namespace, Resource, fields\n'), ((392, 480), 'flask_restplus.fields.String', 'fields.String', ([], {'description': '"""The cart item title"""', 'attribute': '"""variant.product.title"""'}), "(description='The cart item title', attribute=\n 'variant.product.title')\n", (405, 480), False, 'from flask_restplus import Namespace, Resource, fields\n'), ((518, 595), 'flask_restplus.fields.String', 'fields.String', ([], {'description': '"""The cart item variant"""', 'attribute': '"""variant.title"""'}), "(description='The cart item variant', attribute='variant.title')\n", (531, 595), False, 'from flask_restplus import Namespace, Resource, fields\n'), ((641, 729), 'flask_restplus.fields.Integer', 'fields.Integer', ([], {'description': '"""The cart item product"""', 'attribute': '"""variant.product.id"""'}), "(description='The cart item product', attribute=\n 'variant.product.id')\n", (655, 729), False, 'from flask_restplus import Namespace, Resource, fields\n'), ((765, 839), 'flask_restplus.fields.Float', 'fields.Float', ([], {'description': '"""The cart item price"""', 'attribute': '"""variant.price"""'}), "(description='The cart item price', attribute='variant.price')\n", (777, 839), False, 'from flask_restplus import Namespace, Resource, fields\n'), ((884, 976), 'flask_restplus.fields.String', 'fields.String', ([], {'description': '"""The cart item image"""', 'attribute': '"""variant.product.first_img"""'}), "(description='The cart item image', attribute=\n 'variant.product.first_img')\n", (897, 976), False, 'from flask_restplus import Namespace, Resource, fields\n')]
|
from __future__ import unicode_literals
import collections
from django.utils.encoding import force_text
from rest_framework.compat import OrderedDict, unicode_to_repr
class ReturnDict(OrderedDict):
"""
Return object from `serialier.data` for the `Serializer` class.
Includes a backlink to the serializer instance for renderers
to use if they need richer field information.
"""
def __init__(self, *args, **kwargs):
self.serializer = kwargs.pop('serializer')
super(ReturnDict, self).__init__(*args, **kwargs)
def copy(self):
return ReturnDict(self, serializer=self.serializer)
def __repr__(self):
return dict.__repr__(self)
def __reduce__(self):
# Pickling these objects will drop the .serializer backlink,
# but preserve the raw data.
return (dict, (dict(self),))
class ReturnList(list):
"""
Return object from `serialier.data` for the `SerializerList` class.
Includes a backlink to the serializer instance for renderers
to use if they need richer field information.
"""
def __init__(self, *args, **kwargs):
self.serializer = kwargs.pop('serializer')
super(ReturnList, self).__init__(*args, **kwargs)
def __repr__(self):
return list.__repr__(self)
def __reduce__(self):
# Pickling these objects will drop the .serializer backlink,
# but preserve the raw data.
return (list, (list(self),))
class BoundField(object):
"""
A field object that also includes `.value` and `.error` properties.
Returned when iterating over a serializer instance,
providing an API similar to Django forms and form fields.
"""
def __init__(self, field, value, errors, prefix=''):
self._field = field
self._prefix = prefix
self.value = value
self.errors = errors
self.name = prefix + self.field_name
def __getattr__(self, attr_name):
return getattr(self._field, attr_name)
@property
def _proxy_class(self):
return self._field.__class__
def __repr__(self):
return unicode_to_repr('<%s value=%s errors=%s>' % (
self.__class__.__name__, self.value, self.errors
))
def as_form_field(self):
value = '' if (self.value is None or self.value is False) else force_text(self.value)
return self.__class__(self._field, value, self.errors, self._prefix)
class NestedBoundField(BoundField):
"""
This `BoundField` additionally implements __iter__ and __getitem__
in order to support nested bound fields. This class is the type of
`BoundField` that is used for serializer fields.
"""
def __init__(self, field, value, errors, prefix=''):
if value is None:
value = {}
super(NestedBoundField, self).__init__(field, value, errors, prefix)
def __iter__(self):
for field in self.fields.values():
yield self[field.field_name]
def __getitem__(self, key):
field = self.fields[key]
value = self.value.get(key) if self.value else None
error = self.errors.get(key) if self.errors else None
if hasattr(field, 'fields'):
return NestedBoundField(field, value, error, prefix=self.name + '.')
return BoundField(field, value, error, prefix=self.name + '.')
def as_form_field(self):
values = {}
for key, value in self.value.items():
if isinstance(value, (list, dict)):
values[key] = value
else:
values[key] = '' if value is None else force_text(value)
return self.__class__(self._field, values, self.errors, self._prefix)
class BindingDict(collections.MutableMapping):
"""
This dict-like object is used to store fields on a serializer.
This ensures that whenever fields are added to the serializer we call
`field.bind()` so that the `field_name` and `parent` attributes
can be set correctly.
"""
def __init__(self, serializer):
self.serializer = serializer
self.fields = OrderedDict()
def __setitem__(self, key, field):
self.fields[key] = field
field.bind(field_name=key, parent=self.serializer)
def __getitem__(self, key):
return self.fields[key]
def __delitem__(self, key):
del self.fields[key]
def __iter__(self):
return iter(self.fields)
def __len__(self):
return len(self.fields)
def __repr__(self):
return dict.__repr__(self.fields)
|
[
"rest_framework.compat.OrderedDict",
"rest_framework.compat.unicode_to_repr",
"django.utils.encoding.force_text"
] |
[((2131, 2231), 'rest_framework.compat.unicode_to_repr', 'unicode_to_repr', (["('<%s value=%s errors=%s>' % (self.__class__.__name__, self.value, self.errors)\n )"], {}), "('<%s value=%s errors=%s>' % (self.__class__.__name__, self.\n value, self.errors))\n", (2146, 2231), False, 'from rest_framework.compat import OrderedDict, unicode_to_repr\n'), ((4115, 4128), 'rest_framework.compat.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4126, 4128), False, 'from rest_framework.compat import OrderedDict, unicode_to_repr\n'), ((2350, 2372), 'django.utils.encoding.force_text', 'force_text', (['self.value'], {}), '(self.value)\n', (2360, 2372), False, 'from django.utils.encoding import force_text\n'), ((3622, 3639), 'django.utils.encoding.force_text', 'force_text', (['value'], {}), '(value)\n', (3632, 3639), False, 'from django.utils.encoding import force_text\n')]
|
# --------------------------------------------------------
# Flow-Guided Feature Aggregation
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""
given a imagenet vid imdb, compute mAP
"""
import numpy as np
import os
import cPickle
def parse_vid_rec(filename, classhash, img_ids, defaultIOUthr=0.5, pixelTolerance=10):
"""
parse imagenet vid record into a dictionary
:param filename: xml file path
:return: list of dict
"""
import xml.etree.ElementTree as ET
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_dict = dict()
obj_dict['label'] = classhash[obj.find('name').text]
bbox = obj.find('bndbox')
obj_dict['bbox'] = [float(bbox.find('xmin').text),
float(bbox.find('ymin').text),
float(bbox.find('xmax').text),
float(bbox.find('ymax').text)]
gt_w = obj_dict['bbox'][2] - obj_dict['bbox'][0] + 1
gt_h = obj_dict['bbox'][3] - obj_dict['bbox'][1] + 1
thr = (gt_w*gt_h)/((gt_w+pixelTolerance)*(gt_h+pixelTolerance))
obj_dict['thr'] = np.min([thr, defaultIOUthr])
objects.append(obj_dict)
return {'bbox' : np.array([x['bbox'] for x in objects]),
'label': np.array([x['label'] for x in objects]),
'thr' : np.array([x['thr'] for x in objects]),
'img_ids': img_ids}
def vid_ap(rec, prec):
"""
average precision calculations
[precision integrated to recall]
:param rec: recall
:param prec: precision
:return: average precision
"""
# append sentinel values at both ends
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute precision integration ladder
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# look for recall value changes
i = np.where(mrec[1:] != mrec[:-1])[0]
# sum (\delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def vid_eval(multifiles, detpath, annopath, imageset_file, classname_map, annocache, ovthresh=0.5):
"""
imagenet vid evaluation
:param detpath: detection results detpath.format(classname)
:param annopath: annotations annopath.format(classname)
:param imageset_file: text file containing list of images
:param annocache: caching annotations
:param ovthresh: overlap threshold
:return: rec, prec, ap
"""
with open(imageset_file, 'r') as f:
lines = [x.strip().split(' ') for x in f.readlines()]
img_basenames = [x[0] for x in lines]
gt_img_ids = [int(x[1]) for x in lines]
classhash = dict(zip(classname_map, range(0,len(classname_map))))
# load annotations from cache
if not os.path.isfile(annocache):
recs = []
for ind, image_filename in enumerate(img_basenames):
recs.append(parse_vid_rec(annopath.format('VID/' + image_filename), classhash, gt_img_ids[ind]))
if ind % 100 == 0:
print('reading annotations for {:d}/{:d}'.format(ind + 1, len(img_basenames)))
print('saving annotations cache to {:s}'.format(annocache))
with open(annocache, 'wb') as f:
cPickle.dump(recs, f, protocol=cPickle.HIGHEST_PROTOCOL)
else:
with open(annocache, 'rb') as f:
recs = cPickle.load(f)
# extract objects in :param classname:
npos = np.zeros(len(classname_map))
for rec in recs:
rec_labels = rec['label']
for x in rec_labels:
npos[x] += 1
# read detections
splitlines = []
if (multifiles == False):
with open(detpath, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
else:
for det in detpath:
with open(det, 'r') as f:
lines = f.readlines()
splitlines += [x.strip().split(' ') for x in lines]
img_ids = np.array([int(x[0]) for x in splitlines])
obj_labels = np.array([int(x[1]) for x in splitlines])
obj_confs = np.array([float(x[2]) for x in splitlines])
obj_bboxes = np.array([[float(z) for z in x[3:]] for x in splitlines])
# sort by confidence
if obj_bboxes.shape[0] > 0:
sorted_inds = np.argsort(img_ids)
img_ids = img_ids[sorted_inds]
obj_labels = obj_labels[sorted_inds]
obj_confs = obj_confs[sorted_inds]
obj_bboxes = obj_bboxes[sorted_inds, :]
num_imgs = max(max(gt_img_ids),max(img_ids)) + 1
obj_labels_cell = [None] * num_imgs
obj_confs_cell = [None] * num_imgs
obj_bboxes_cell = [None] * num_imgs
start_i = 0
id = img_ids[0]
for i in range(0, len(img_ids)):
if i == len(img_ids)-1 or img_ids[i+1] != id:
conf = obj_confs[start_i:i+1]
label = obj_labels[start_i:i+1]
bbox = obj_bboxes[start_i:i+1, :]
sorted_inds = np.argsort(-conf)
obj_labels_cell[id] = label[sorted_inds]
obj_confs_cell[id] = conf[sorted_inds]
obj_bboxes_cell[id] = bbox[sorted_inds, :]
if i < len(img_ids)-1:
id = img_ids[i+1]
start_i = i+1
# go down detections and mark true positives and false positives
tp_cell = [None] * num_imgs
fp_cell = [None] * num_imgs
for rec in recs:
id = rec['img_ids']
gt_labels = rec['label']
gt_bboxes = rec['bbox']
gt_thr = rec['thr']
num_gt_obj = len(gt_labels)
gt_detected = np.zeros(num_gt_obj)
labels = obj_labels_cell[id]
bboxes = obj_bboxes_cell[id]
num_obj = 0 if labels is None else len(labels)
tp = np.zeros(num_obj)
fp = np.zeros(num_obj)
for j in range(0,num_obj):
bb = bboxes[j, :]
ovmax = -1
kmax = -1
for k in range(0,num_gt_obj):
if labels[j] != gt_labels[k]:
continue
if gt_detected[k] > 0:
continue
bbgt = gt_bboxes[k, :]
bi=[np.max((bb[0],bbgt[0])), np.max((bb[1],bbgt[1])), np.min((bb[2],bbgt[2])), np.min((bb[3],bbgt[3]))]
iw=bi[2]-bi[0]+1
ih=bi[3]-bi[1]+1
if iw>0 and ih>0:
# compute overlap as area of intersection / area of union
ua = (bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) + \
(bbgt[2] - bbgt[0] + 1.) * \
(bbgt[3] - bbgt[1] + 1.) - iw*ih
ov=iw*ih/ua
# makes sure that this object is detected according
# to its individual threshold
if ov >= gt_thr[k] and ov > ovmax:
ovmax=ov
kmax=k
if kmax >= 0:
tp[j] = 1
gt_detected[kmax] = 1
else:
fp[j] = 1
tp_cell[id] = tp
fp_cell[id] = fp
tp_all = np.concatenate([x for x in np.array(tp_cell)[gt_img_ids] if x is not None])
fp_all = np.concatenate([x for x in np.array(fp_cell)[gt_img_ids] if x is not None])
obj_labels = np.concatenate([x for x in np.array(obj_labels_cell)[gt_img_ids] if x is not None])
confs = np.concatenate([x for x in np.array(obj_confs_cell)[gt_img_ids] if x is not None])
sorted_inds = np.argsort(-confs)
tp_all = tp_all[sorted_inds]
fp_all = fp_all[sorted_inds]
obj_labels = obj_labels[sorted_inds]
ap = np.zeros(len(classname_map))
for c in range(1, len(classname_map)):
# compute precision recall
fp = np.cumsum(fp_all[obj_labels == c])
tp = np.cumsum(tp_all[obj_labels == c])
rec = tp / float(npos[c])
# avoid division by zero in case first detection matches a difficult ground ruth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap[c] = vid_ap(rec, prec)
ap = ap[1:]
return ap
|
[
"xml.etree.ElementTree.parse",
"numpy.sum",
"numpy.maximum",
"numpy.zeros",
"cPickle.load",
"numpy.argsort",
"numpy.cumsum",
"numpy.min",
"numpy.where",
"numpy.array",
"os.path.isfile",
"cPickle.dump",
"numpy.max",
"numpy.finfo",
"numpy.concatenate"
] |
[((619, 637), 'xml.etree.ElementTree.parse', 'ET.parse', (['filename'], {}), '(filename)\n', (627, 637), True, 'import xml.etree.ElementTree as ET\n'), ((1799, 1834), 'numpy.concatenate', 'np.concatenate', (['([0.0], rec, [1.0])'], {}), '(([0.0], rec, [1.0]))\n', (1813, 1834), True, 'import numpy as np\n'), ((1844, 1880), 'numpy.concatenate', 'np.concatenate', (['([0.0], prec, [0.0])'], {}), '(([0.0], prec, [0.0]))\n', (1858, 1880), True, 'import numpy as np\n'), ((2143, 2188), 'numpy.sum', 'np.sum', (['((mrec[i + 1] - mrec[i]) * mpre[i + 1])'], {}), '((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n', (2149, 2188), True, 'import numpy as np\n'), ((7628, 7646), 'numpy.argsort', 'np.argsort', (['(-confs)'], {}), '(-confs)\n', (7638, 7646), True, 'import numpy as np\n'), ((1271, 1299), 'numpy.min', 'np.min', (['[thr, defaultIOUthr]'], {}), '([thr, defaultIOUthr])\n', (1277, 1299), True, 'import numpy as np\n'), ((1354, 1392), 'numpy.array', 'np.array', (["[x['bbox'] for x in objects]"], {}), "([x['bbox'] for x in objects])\n", (1362, 1392), True, 'import numpy as np\n'), ((1416, 1455), 'numpy.array', 'np.array', (["[x['label'] for x in objects]"], {}), "([x['label'] for x in objects])\n", (1424, 1455), True, 'import numpy as np\n'), ((1479, 1516), 'numpy.array', 'np.array', (["[x['thr'] for x in objects]"], {}), "([x['thr'] for x in objects])\n", (1487, 1516), True, 'import numpy as np\n'), ((1987, 2019), 'numpy.maximum', 'np.maximum', (['mpre[i - 1]', 'mpre[i]'], {}), '(mpre[i - 1], mpre[i])\n', (1997, 2019), True, 'import numpy as np\n'), ((2065, 2096), 'numpy.where', 'np.where', (['(mrec[1:] != mrec[:-1])'], {}), '(mrec[1:] != mrec[:-1])\n', (2073, 2096), True, 'import numpy as np\n'), ((2951, 2976), 'os.path.isfile', 'os.path.isfile', (['annocache'], {}), '(annocache)\n', (2965, 2976), False, 'import os\n'), ((4462, 4481), 'numpy.argsort', 'np.argsort', (['img_ids'], {}), '(img_ids)\n', (4472, 4481), True, 'import numpy as np\n'), ((5728, 5748), 'numpy.zeros', 'np.zeros', (['num_gt_obj'], {}), '(num_gt_obj)\n', (5736, 5748), True, 'import numpy as np\n'), ((5893, 5910), 'numpy.zeros', 'np.zeros', (['num_obj'], {}), '(num_obj)\n', (5901, 5910), True, 'import numpy as np\n'), ((5924, 5941), 'numpy.zeros', 'np.zeros', (['num_obj'], {}), '(num_obj)\n', (5932, 5941), True, 'import numpy as np\n'), ((7884, 7918), 'numpy.cumsum', 'np.cumsum', (['fp_all[obj_labels == c]'], {}), '(fp_all[obj_labels == c])\n', (7893, 7918), True, 'import numpy as np\n'), ((7932, 7966), 'numpy.cumsum', 'np.cumsum', (['tp_all[obj_labels == c]'], {}), '(tp_all[obj_labels == c])\n', (7941, 7966), True, 'import numpy as np\n'), ((3413, 3469), 'cPickle.dump', 'cPickle.dump', (['recs', 'f'], {'protocol': 'cPickle.HIGHEST_PROTOCOL'}), '(recs, f, protocol=cPickle.HIGHEST_PROTOCOL)\n', (3425, 3469), False, 'import cPickle\n'), ((3540, 3555), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (3552, 3555), False, 'import cPickle\n'), ((5115, 5132), 'numpy.argsort', 'np.argsort', (['(-conf)'], {}), '(-conf)\n', (5125, 5132), True, 'import numpy as np\n'), ((6297, 6321), 'numpy.max', 'np.max', (['(bb[0], bbgt[0])'], {}), '((bb[0], bbgt[0]))\n', (6303, 6321), True, 'import numpy as np\n'), ((6322, 6346), 'numpy.max', 'np.max', (['(bb[1], bbgt[1])'], {}), '((bb[1], bbgt[1]))\n', (6328, 6346), True, 'import numpy as np\n'), ((6347, 6371), 'numpy.min', 'np.min', (['(bb[2], bbgt[2])'], {}), '((bb[2], bbgt[2]))\n', (6353, 6371), True, 'import numpy as np\n'), ((6372, 6396), 'numpy.min', 'np.min', (['(bb[3], bbgt[3])'], {}), '((bb[3], bbgt[3]))\n', (6378, 6396), True, 'import numpy as np\n'), ((7275, 7292), 'numpy.array', 'np.array', (['tp_cell'], {}), '(tp_cell)\n', (7283, 7292), True, 'import numpy as np\n'), ((7364, 7381), 'numpy.array', 'np.array', (['fp_cell'], {}), '(fp_cell)\n', (7372, 7381), True, 'import numpy as np\n'), ((7457, 7482), 'numpy.array', 'np.array', (['obj_labels_cell'], {}), '(obj_labels_cell)\n', (7465, 7482), True, 'import numpy as np\n'), ((7553, 7577), 'numpy.array', 'np.array', (['obj_confs_cell'], {}), '(obj_confs_cell)\n', (7561, 7577), True, 'import numpy as np\n'), ((8130, 8150), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (8138, 8150), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
#
# SPDX-License-Identifier: BSD-2-Clause
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
import unittest
from capdl import ELF
from tests import CapdlTestCase
class TestElf(CapdlTestCase):
def test_elf(self):
elf = ELF('resources/arm-hello.bin')
assert elf.get_arch() in [40, 'EM_ARM', 'ARM']
elf.get_spec()
def test_ia32_elf(self):
elf = ELF('resources/ia32-hello.bin')
assert elf.get_arch() == 'x86'
elf.get_spec()
def test_symbol_lookup(self):
elf = ELF('resources/unstripped.bin')
assert elf.get_arch() == 'x86'
# Confirm that the address concurs with the one we get from objdump.
assert elf.get_symbol_vaddr('_start') == 0x08048d48
elf = ELF('resources/stripped.bin')
assert elf.get_arch() == 'x86'
# We shouldn't be able to get the symbol from the stripped binary.
try:
vaddr = elf.get_symbol_vaddr('_start')
assert not ('Symbol lookup on a stripped binary returned _start == 0x%0.8x' % vaddr)
except:
# Expected
pass
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"capdl.ELF"
] |
[((1270, 1285), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1283, 1285), False, 'import unittest\n'), ((354, 384), 'capdl.ELF', 'ELF', (['"""resources/arm-hello.bin"""'], {}), "('resources/arm-hello.bin')\n", (357, 384), False, 'from capdl import ELF\n'), ((507, 538), 'capdl.ELF', 'ELF', (['"""resources/ia32-hello.bin"""'], {}), "('resources/ia32-hello.bin')\n", (510, 538), False, 'from capdl import ELF\n'), ((651, 682), 'capdl.ELF', 'ELF', (['"""resources/unstripped.bin"""'], {}), "('resources/unstripped.bin')\n", (654, 682), False, 'from capdl import ELF\n'), ((875, 904), 'capdl.ELF', 'ELF', (['"""resources/stripped.bin"""'], {}), "('resources/stripped.bin')\n", (878, 904), False, 'from capdl import ELF\n')]
|
# lshash/lshash.py
# Copyright 2012 <NAME> (a.k.a <NAME>) and contributors (see CONTRIBUTORS.txt)
#
# This module is part of lshash and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# wiki:https://yongyuan.name/blog/ann-search.html
import os
import json
import numpy as np
from storage import storage
try:
from bitarray import bitarray
except ImportError:
bitarray = None
class LSHash(object):
""" LSHash implments locality sensitive hashing using random projection for
input vectors of dimension `input_dim`.
Attributes:
:param hash_size:
The length of the resulting binary hash in integer. E.g., 32 means the
resulting binary hash will be 32-bit long.
:param input_dim:
The dimension of the input vector. E.g., a grey-scale picture of 30x30
pixels will have an input dimension of 900.
:param num_hashtables:
(optional) The number of hash tables used for multiple lookups.
:param storage_config:
(optional) A dictionary of the form `{backend_name: config}` where
`backend_name` is the either `dict` or `redis`, and `config` is the
configuration used by the backend. For `redis` it should be in the
format of `{"redis": {"host": hostname, "port": port_num}}`, where
`hostname` is normally `localhost` and `port` is normally 6379.
:param matrices_filename:
(optional) Specify the path to the compressed numpy file ending with
extension `.npz`, where the uniform random planes are stored, or to be
stored if the file does not exist yet.
:param overwrite:
(optional) Whether to overwrite the matrices file if it already exist
"""
def __init__(self, hash_size, input_dim, num_hashtables=1,
storage_config=None, matrices_filename=None, overwrite=False):
self.hash_size = hash_size
self.input_dim = input_dim
self.num_hashtables = num_hashtables
if storage_config is None:
storage_config = {'dict': None}
self.storage_config = storage_config
if matrices_filename and not matrices_filename.endswith('.npz'):
raise ValueError("The specified file name must end with .npz")
self.matrices_filename = matrices_filename
self.overwrite = overwrite
self._init_uniform_planes()
self._init_hashtables()
def _init_uniform_planes(self):
""" Initialize uniform planes used to calculate the hashes
if file `self.matrices_filename` exist and `self.overwrite` is
selected, save the uniform planes to the specified file.
if file `self.matrices_filename` exist and `self.overwrite` is not
selected, load the matrix with `np.load`.
if file `self.matrices_filename` does not exist and regardless of
`self.overwrite`, only set `self.uniform_planes`.
"""
if "uniform_planes" in self.__dict__:
return
if self.matrices_filename:
file_exist = os.path.isfile(self.matrices_filename)
if file_exist and not self.overwrite:
try:
npzfiles = np.load(self.matrices_filename)
except IOError:
print("Cannot load specified file as a numpy array")
raise
else:
npzfiles = sorted(npzfiles.items(), key=lambda x: x[0])
self.uniform_planes = [t[1] for t in npzfiles]
else:
self.uniform_planes = [self._generate_uniform_planes()
for _ in range(self.num_hashtables)]
try:
np.savez_compressed(self.matrices_filename,
*self.uniform_planes)
except IOError:
print("IOError when saving matrices to specificed path")
raise
else:
# 生成num_hashtable个随机划分表,每个表维度:[hash_size, dim]
self.uniform_planes = [self._generate_uniform_planes()
for _ in range(self.num_hashtables)]
def _init_hashtables(self):
""" Initialize the hash tables such that each record will be in the
form of "[storage1, storage2, ...]" """
# 初始化num_hashtable个hash表
self.hash_tables = [storage(self.storage_config, i)
for i in range(self.num_hashtables)]
def _generate_uniform_planes(self):
""" Generate uniformly distributed hyperplanes and return it as a 2D
numpy array.
"""
# 随机矩阵[hash_size, input_dim], 矩阵每行代表一条直线的法向量,Ax>0即代表与所有平面的法向量的夹角<90的所有点x的集合
return np.random.randn(self.hash_size, self.input_dim)
def _hash(self, planes, input_point):
""" Generates the binary hash for `input_point` and returns it.
:param planes:
The planes are random uniform planes with a dimension of
`hash_size` * `input_dim`.
:param input_point:
A Python tuple or list object that contains only numbers.
The dimension needs to be 1 * `input_dim`.
"""
try:
input_point = np.array(input_point) # for faster dot product
# planes:[hash_size, dim]
# input_point:[dim], 亦可理解为input_point: [dim,1]
# projections:[hash_size] = planes*input_point
# 矩阵planes与input_points相乘的几何意义就是判断input_points是否与planes每行的直线法向量的乘积
projections = np.dot(planes, input_point)
except TypeError as e:
print("""The input point needs to be an array-like object with
numbers only elements""")
raise
except ValueError as e:
print("""The input point needs to be of the same dimension as
`input_dim` when initializing this LSHash instance""", e)
raise
else:
# 比如可能是'001'
return "".join(['1' if i > 0 else '0' for i in projections])
def _as_np_array(self, json_or_tuple):
""" Takes either a JSON-serialized data structure or a tuple that has
the original input points stored, and returns the original input point
in numpy array format.
"""
if isinstance(json_or_tuple, str):
# JSON-serialized in the case of Redis
try:
# Return the point stored as list, without the extra data
tuples = json.loads(json_or_tuple)[0]
except TypeError:
print("The value stored is not JSON-serilizable")
raise
else:
# If extra_data exists, `tuples` is the entire
# (point:tuple, extra_data). Otherwise (i.e., extra_data=None),
# return the point stored as a tuple
tuples = json_or_tuple
if isinstance(tuples[0], tuple):
# in this case extra data exists
return np.asarray(tuples[0])
elif isinstance(tuples, (tuple, list)):
try:
return np.asarray(tuples)
except ValueError as e:
print("The input needs to be an array-like object", e)
raise
else:
raise TypeError("query data is not supported")
def index(self, input_point, extra_data=None):
""" Index a single input point by adding it to the selected storage.
If `extra_data` is provided, it will become the value of the dictionary
{input_point: extra_data}, which in turn will become the value of the
hash table. `extra_data` needs to be JSON serializable if in-memory
dict is not used as storage.
:param input_point:
A list, or tuple, or numpy ndarray object that contains numbers
only. The dimension needs to be 1 * `input_dim`.
This object will be converted to Python tuple and stored in the
selected storage.
:param extra_data:
(optional) Needs to be a JSON-serializable object: list, dicts and
basic types such as strings and integers.
"""
if isinstance(input_point, np.ndarray):
input_point = input_point.tolist()
if extra_data:
value = (tuple(input_point), extra_data)
else:
value = tuple(input_point)
# 每个hash表均要存一下hash串+原始point
for i, table in enumerate(self.hash_tables):
# 生成num_hashtable个随机划分表,每个表维度:[hash_size, dim]
hash_code = self._hash(self.uniform_planes[i], input_point)
# 之所以有多个表,就是为了增大召回,即只要有一个划分方式一致就可以召回
table.append_val(key=hash_code, val=value)
def query(self, query_point, num_results=None, distance_func_for_hash=None):
""" Takes `query_point` which is either a tuple or a list of numbers,
returns `num_results` of results as a list of tuples that are ranked
based on the supplied metric function `distance_func`.
:param query_point:
A list, or tuple, or numpy ndarray that only contains numbers.
The dimension needs to be 1 * `input_dim`.
Used by :meth:`._hash`.
:param num_results:
(optional) Integer, specifies the max amount of results to be
returned. If not specified all candidates will be returned as a
list in ranked order.
:param distance_func_for_hash:
(optional) The distance function to be used. Currently it needs to
be one of ("hamming", "euclidean", "true_euclidean",
"centred_euclidean", "cosine", "l1norm"). By default "euclidean"
will used.
"""
candidates = set()
if not distance_func_for_hash:
distance_func_for_hash = "euclidean"
if distance_func_for_hash == "hamming": # 此时实际上就是multi-probe的方式进行探测
if not bitarray:
raise ImportError(" Bitarray is required for hamming distance")
for i, table in enumerate(self.hash_tables):
binary_hash_for_query = self._hash(self.uniform_planes[i], query_point)
for key in table.keys():
distance = LSHash.hamming_dist(key, binary_hash_for_query)
# 所有hamming距离<2的全都加入候选集合,注意,不一定是相同hash_key下的所有候选point
if distance < 2:
# 将该hash_key下所有的原始值全加入set
candidates.update(table.get_list(key))
d_func_for_rank = LSHash.euclidean_dist_square
else: # euclidean
if distance_func_for_hash == "euclidean":
d_func_for_rank = LSHash.euclidean_dist_square
elif distance_func_for_hash == "true_euclidean":
d_func_for_rank = LSHash.euclidean_dist
elif distance_func_for_hash == "centred_euclidean":
d_func_for_rank = LSHash.euclidean_dist_centred
elif distance_func_for_hash == "cosine":
d_func_for_rank = LSHash.cosine_dist
elif distance_func_for_hash == "l1norm":
d_func_for_rank = LSHash.l1norm_dist
else:
raise ValueError("The distance function name is invalid.")
# 只有hash值相同的才认为是候选集合,只要有一个hash表认为是候选就加入候选
for i, table in enumerate(self.hash_tables):
binary_hash_for_query = self._hash(self.uniform_planes[i], query_point)
candidates.update(table.get_list(binary_hash_for_query))
# rank candidates by distance function
# 计算query与每个候选集原始值的距离
# [(candidate_point, distance),... ]
candidates = [(candidate_point, d_func_for_rank(query_point, self._as_np_array(candidate_point)))
for candidate_point in candidates]
candidates.sort(key=lambda x: x[1]) # 按距离升序排序
# 选出距离最近的topK
return candidates[:num_results] if num_results else candidates
### distance functions
# 海明距离是直接异或么?直接数不同的位数的个数
@staticmethod
def hamming_dist(bitarray1, bitarray2):
xor_result = bitarray(bitarray1) ^ bitarray(bitarray2)
return xor_result.count()
@staticmethod
def euclidean_dist(x, y):
""" This is a hot function, hence some optimizations are made. """
return np.sqrt(LSHash.euclidean_dist_square(x,y))
@staticmethod
def euclidean_dist_square(x, y):
""" This is a hot function, hence some optimizations are made. """
diff = np.array(x) - y
return np.dot(diff, diff)
@staticmethod
def euclidean_dist_centred(x, y):
""" This is a hot function, hence some optimizations are made. """
diff = np.mean(x) - np.mean(y)
return np.dot(diff, diff)
@staticmethod
def l1norm_dist(x, y):
return sum(abs(x - y))
@staticmethod
def cosine_dist(x, y):
return 1 - np.dot(x, y) / ((np.dot(x, x) * np.dot(y, y)) ** 0.5)
if __name__ == '__main__':
lsh = LSHash(hash_size=6, input_dim=8, num_hashtables=3)
# 给数据建立hash索引
lsh.index(input_point=[1, 2, 3, 4, 5, 6, 7, 8])
lsh.index(input_point=[2, 3, 4, 5, 6, 7, 8, 9])
lsh.index(input_point=[1, 2, 3, 4, 4, 6, 7, 8])
lsh.index(input_point=[1, 2, 3, 3, 5, 6, 7, 8])
lsh.index(input_point=[1, 2, 3, 4, 5, 6, 7, 9])
lsh.index(input_point=[2, 2, 3, 4, 5, 6, 7, 9])
lsh.index(input_point=[2, -2, 3, 4, 5, 6, 7, 9])
lsh.index(input_point=[-1, 2, 3, 4, 5, 6, 7, 9])
lsh.index(input_point=[10, 12, 99, 1, 5, 31, 2, 3])
# 查询
res = lsh.query(query_point=[1, 2, 3, 4, 5, 6, 7, 7], num_results=4)
print(res)
|
[
"storage.storage",
"numpy.load",
"json.loads",
"numpy.random.randn",
"numpy.asarray",
"os.path.isfile",
"numpy.mean",
"numpy.array",
"numpy.savez_compressed",
"numpy.dot",
"bitarray.bitarray"
] |
[((4758, 4805), 'numpy.random.randn', 'np.random.randn', (['self.hash_size', 'self.input_dim'], {}), '(self.hash_size, self.input_dim)\n', (4773, 4805), True, 'import numpy as np\n'), ((12575, 12593), 'numpy.dot', 'np.dot', (['diff', 'diff'], {}), '(diff, diff)\n', (12581, 12593), True, 'import numpy as np\n'), ((12780, 12798), 'numpy.dot', 'np.dot', (['diff', 'diff'], {}), '(diff, diff)\n', (12786, 12798), True, 'import numpy as np\n'), ((3065, 3103), 'os.path.isfile', 'os.path.isfile', (['self.matrices_filename'], {}), '(self.matrices_filename)\n', (3079, 3103), False, 'import os\n'), ((4411, 4442), 'storage.storage', 'storage', (['self.storage_config', 'i'], {}), '(self.storage_config, i)\n', (4418, 4442), False, 'from storage import storage\n'), ((5258, 5279), 'numpy.array', 'np.array', (['input_point'], {}), '(input_point)\n', (5266, 5279), True, 'import numpy as np\n'), ((5567, 5594), 'numpy.dot', 'np.dot', (['planes', 'input_point'], {}), '(planes, input_point)\n', (5573, 5594), True, 'import numpy as np\n'), ((7015, 7036), 'numpy.asarray', 'np.asarray', (['tuples[0]'], {}), '(tuples[0])\n', (7025, 7036), True, 'import numpy as np\n'), ((12140, 12159), 'bitarray.bitarray', 'bitarray', (['bitarray1'], {}), '(bitarray1)\n', (12148, 12159), False, 'from bitarray import bitarray\n'), ((12162, 12181), 'bitarray.bitarray', 'bitarray', (['bitarray2'], {}), '(bitarray2)\n', (12170, 12181), False, 'from bitarray import bitarray\n'), ((12544, 12555), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (12552, 12555), True, 'import numpy as np\n'), ((12741, 12751), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (12748, 12751), True, 'import numpy as np\n'), ((12754, 12764), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (12761, 12764), True, 'import numpy as np\n'), ((12941, 12953), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (12947, 12953), True, 'import numpy as np\n'), ((3206, 3237), 'numpy.load', 'np.load', (['self.matrices_filename'], {}), '(self.matrices_filename)\n', (3213, 3237), True, 'import numpy as np\n'), ((3740, 3805), 'numpy.savez_compressed', 'np.savez_compressed', (['self.matrices_filename', '*self.uniform_planes'], {}), '(self.matrices_filename, *self.uniform_planes)\n', (3759, 3805), True, 'import numpy as np\n'), ((6529, 6554), 'json.loads', 'json.loads', (['json_or_tuple'], {}), '(json_or_tuple)\n', (6539, 6554), False, 'import json\n'), ((7126, 7144), 'numpy.asarray', 'np.asarray', (['tuples'], {}), '(tuples)\n', (7136, 7144), True, 'import numpy as np\n'), ((12958, 12970), 'numpy.dot', 'np.dot', (['x', 'x'], {}), '(x, x)\n', (12964, 12970), True, 'import numpy as np\n'), ((12973, 12985), 'numpy.dot', 'np.dot', (['y', 'y'], {}), '(y, y)\n', (12979, 12985), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import unittest
from network import TargetNetwork, Network
class TargetNetworkTest(unittest.TestCase):
def test_init(self):
np.random.seed(seed=0)
target_network = TargetNetwork()
for i in range(10):
input_values, target_values = target_network.get_training_pair()
self.assertEqual(input_values.shape, (30, ))
self.assertEqual(target_values.shape, (10, ))
print(target_values)
"""
def test_save(self):
np.random.seed(seed=0)
target_network = TargetNetwork()
target_network.save("tmp")
target_network.load("tmp")
"""
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"network.TargetNetwork",
"numpy.random.seed"
] |
[((829, 844), 'unittest.main', 'unittest.main', ([], {}), '()\n', (842, 844), False, 'import unittest\n'), ((292, 314), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(0)'}), '(seed=0)\n', (306, 314), True, 'import numpy as np\n'), ((340, 355), 'network.TargetNetwork', 'TargetNetwork', ([], {}), '()\n', (353, 355), False, 'from network import TargetNetwork, Network\n')]
|
# Python3 program to print DFS traversal
from collections import defaultdict
# This class represents a directed graph using
# adjacency list representation
class Graph:
# Constructor
def __init__(self):
# default dictionary to store graph
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
# A function used by DFS
def DFSUtil(self, v, visited):
# Mark the current node as visited
# and print it
visited[v] = True
print(v, end = ' ')
# Recur for all the vertices
# adjacent to this vertex
for i in self.graph[v]:
if visited[i] == False:
self.DFSUtil(i, visited)
# The function to do DFS traversal. It uses
# recursive DFSUtil()
def DFS(self, v):
# Mark all the vertices as not visited
visited = [False] * (len(self.graph))
# Call the recursive helper function
# to print DFS traversal
self.DFSUtil(v, visited)
# Driver code
# Create a graph given
# in the above diagram
g = Graph()
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
print("Following is DFS from (starting from vertex 2)")
g.DFS(2)
|
[
"collections.defaultdict"
] |
[((295, 312), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (306, 312), False, 'from collections import defaultdict\n')]
|
from mpvr.datamodule.manager import Manager as dm
from mpvr.utils.process import *
from scipy.signal import savgol_filter
import numpy as np
import pandas as pd
dm = dm.from_config(dm.section_list()[1])
max_val = 0
for scenario in dm.get_scenarios():
dm.set_scenario(scenario)
df = dm.get_processed_data('mpe')
mpe = df['MPEntropy'].values
if np.max(np.abs(mpe)) > max_val:
max_val = np.max(np.abs(mpe))
print(max_val)
for scenario in dm.get_scenarios():
print(scenario)
dm.set_scenario(scenario)
df = dm.get_processed_data('mpe')
time, mpe = df['Time'].values, df['MPEntropy'].values
acr = absolute_category_rating(mpe, grid)
incidence = dm.get_incidence_data()
fig, axes = dm.fig_setup(2, ['ACR', 'Incidence'], np.arange(0, len(time)/3, 2), times = time, width = 3, height=3)
axes[0].plot(time, acr)
axes[1].bar(time, incidence, width=0.2)
axes[0].set_yticks(np.arange(1, 6, 1))
axes[1].set_yticks(np.arange(0, 5, 1))
dm.fig_finalize(tag='mpe', remark_dir='acr/')
|
[
"numpy.abs",
"mpvr.datamodule.manager.Manager.fig_finalize",
"mpvr.datamodule.manager.Manager.get_incidence_data",
"numpy.arange",
"mpvr.datamodule.manager.Manager.get_scenarios",
"mpvr.datamodule.manager.Manager.set_scenario",
"mpvr.datamodule.manager.Manager.section_list",
"mpvr.datamodule.manager.Manager.get_processed_data"
] |
[((233, 251), 'mpvr.datamodule.manager.Manager.get_scenarios', 'dm.get_scenarios', ([], {}), '()\n', (249, 251), True, 'from mpvr.datamodule.manager import Manager as dm\n'), ((463, 481), 'mpvr.datamodule.manager.Manager.get_scenarios', 'dm.get_scenarios', ([], {}), '()\n', (479, 481), True, 'from mpvr.datamodule.manager import Manager as dm\n'), ((257, 282), 'mpvr.datamodule.manager.Manager.set_scenario', 'dm.set_scenario', (['scenario'], {}), '(scenario)\n', (272, 282), True, 'from mpvr.datamodule.manager import Manager as dm\n'), ((292, 320), 'mpvr.datamodule.manager.Manager.get_processed_data', 'dm.get_processed_data', (['"""mpe"""'], {}), "('mpe')\n", (313, 320), True, 'from mpvr.datamodule.manager import Manager as dm\n'), ((507, 532), 'mpvr.datamodule.manager.Manager.set_scenario', 'dm.set_scenario', (['scenario'], {}), '(scenario)\n', (522, 532), True, 'from mpvr.datamodule.manager import Manager as dm\n'), ((543, 571), 'mpvr.datamodule.manager.Manager.get_processed_data', 'dm.get_processed_data', (['"""mpe"""'], {}), "('mpe')\n", (564, 571), True, 'from mpvr.datamodule.manager import Manager as dm\n'), ((692, 715), 'mpvr.datamodule.manager.Manager.get_incidence_data', 'dm.get_incidence_data', ([], {}), '()\n', (713, 715), True, 'from mpvr.datamodule.manager import Manager as dm\n'), ((999, 1044), 'mpvr.datamodule.manager.Manager.fig_finalize', 'dm.fig_finalize', ([], {'tag': '"""mpe"""', 'remark_dir': '"""acr/"""'}), "(tag='mpe', remark_dir='acr/')\n", (1014, 1044), True, 'from mpvr.datamodule.manager import Manager as dm\n'), ((182, 199), 'mpvr.datamodule.manager.Manager.section_list', 'dm.section_list', ([], {}), '()\n', (197, 199), True, 'from mpvr.datamodule.manager import Manager as dm\n'), ((932, 950), 'numpy.arange', 'np.arange', (['(1)', '(6)', '(1)'], {}), '(1, 6, 1)\n', (941, 950), True, 'import numpy as np\n'), ((975, 993), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (984, 993), True, 'import numpy as np\n'), ((368, 379), 'numpy.abs', 'np.abs', (['mpe'], {}), '(mpe)\n', (374, 379), True, 'import numpy as np\n'), ((417, 428), 'numpy.abs', 'np.abs', (['mpe'], {}), '(mpe)\n', (423, 428), True, 'import numpy as np\n')]
|
"""
Read agg.pt
"""
import sys
import torch
if __name__ == '__main__':
if len(sys.argv) != 2:
quit("Usage: python read_results.py <log_directory>")
fn = 'logs/' + sys.argv[1] + '/agg.pt'
d = torch.load(fn)
print(len(d['test_mll_epoch']))
for i, epoch in enumerate(d['test_mll_epoch']):
valid_mll = d['valid_mll'][d['valid_mll_epoch'].index(epoch)]
test_mll = d['test_mll'][i]
temp = [epoch, valid_mll, test_mll]
if valid_mll == max(d['valid_mll']):
temp.append("Best!")
print(temp)
###
|
[
"torch.load"
] |
[((201, 215), 'torch.load', 'torch.load', (['fn'], {}), '(fn)\n', (211, 215), False, 'import torch\n')]
|
# -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2006-2017 NV Access Limited, <NAME>, <NAME>
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import locale
from collections import OrderedDict
import threading
import time
import os
from ctypes import *
import comtypes.client
from comtypes import COMError
import winreg
import audioDucking
import NVDAHelper
import globalVars
import speech
from synthDriverHandler import SynthDriver, VoiceInfo, synthIndexReached, synthDoneSpeaking
import config
import nvwave
from logHandler import log
import weakref
from speech.commands import (
IndexCommand,
CharacterModeCommand,
LangChangeCommand,
BreakCommand,
PitchCommand,
RateCommand,
VolumeCommand,
PhonemeCommand,
SpeechCommand,
)
# SPAudioState enumeration
SPAS_CLOSED=0
SPAS_STOP=1
SPAS_PAUSE=2
SPAS_RUN=3
class FunctionHooker(object):
def __init__(
self,
targetDll: str,
importDll: str,
funcName: str,
newFunction # result of ctypes.WINFUNCTYPE
):
# dllImportTableHooks_hookSingle expects byte strings.
try:
self._hook=NVDAHelper.localLib.dllImportTableHooks_hookSingle(
targetDll.encode("mbcs"),
importDll.encode("mbcs"),
funcName.encode("mbcs"),
newFunction
)
except UnicodeEncodeError:
log.error("Error encoding FunctionHooker input parameters", exc_info=True)
self._hook = None
if self._hook:
log.debug(f"Hooked {funcName}")
else:
log.error(f"Could not hook {funcName}")
raise RuntimeError(f"Could not hook {funcName}")
def __del__(self):
if self._hook:
NVDAHelper.localLib.dllImportTableHooks_unhookSingle(self._hook)
_duckersByHandle={}
@WINFUNCTYPE(windll.winmm.waveOutOpen.restype,*windll.winmm.waveOutOpen.argtypes,use_errno=False,use_last_error=False)
def waveOutOpen(pWaveOutHandle,deviceID,wfx,callback,callbackInstance,flags):
try:
res=windll.winmm.waveOutOpen(pWaveOutHandle,deviceID,wfx,callback,callbackInstance,flags) or 0
except WindowsError as e:
res=e.winerror
if res==0 and pWaveOutHandle:
h=pWaveOutHandle.contents.value
d=audioDucking.AudioDucker()
d.enable()
_duckersByHandle[h]=d
return res
@WINFUNCTYPE(c_long,c_long)
def waveOutClose(waveOutHandle):
try:
res=windll.winmm.waveOutClose(waveOutHandle) or 0
except WindowsError as e:
res=e.winerror
if res==0 and waveOutHandle:
_duckersByHandle.pop(waveOutHandle,None)
return res
_waveOutHooks=[]
def ensureWaveOutHooks():
if not _waveOutHooks and audioDucking.isAudioDuckingSupported():
sapiPath=os.path.join(os.path.expandvars("$SYSTEMROOT"),"system32","speech","common","sapi.dll")
_waveOutHooks.append(FunctionHooker(sapiPath,"WINMM.dll","waveOutOpen",waveOutOpen))
_waveOutHooks.append(FunctionHooker(sapiPath,"WINMM.dll","waveOutClose",waveOutClose))
class constants:
SVSFlagsAsync = 1
SVSFPurgeBeforeSpeak = 2
SVSFIsXML = 8
# From the SpeechVoiceEvents enum: https://msdn.microsoft.com/en-us/library/ms720886(v=vs.85).aspx
SVEEndInputStream = 4
SVEBookmark = 16
class SapiSink(object):
"""Handles SAPI event notifications.
See https://msdn.microsoft.com/en-us/library/ms723587(v=vs.85).aspx
"""
def __init__(self, synthRef: weakref.ReferenceType):
self.synthRef = synthRef
def Bookmark(self, streamNum, pos, bookmark, bookmarkId):
synth = self.synthRef()
if synth is None:
log.debugWarning("Called Bookmark method on SapiSink while driver is dead")
return
synthIndexReached.notify(synth=synth, index=bookmarkId)
def EndStream(self, streamNum, pos):
synth = self.synthRef()
if synth is None:
log.debugWarning("Called Bookmark method on EndStream while driver is dead")
return
synthDoneSpeaking.notify(synth=synth)
class SynthDriver(SynthDriver):
supportedSettings=(SynthDriver.VoiceSetting(),SynthDriver.RateSetting(),SynthDriver.PitchSetting(),SynthDriver.VolumeSetting())
supportedCommands = {
IndexCommand,
CharacterModeCommand,
LangChangeCommand,
BreakCommand,
PitchCommand,
RateCommand,
VolumeCommand,
PhonemeCommand,
}
supportedNotifications = {synthIndexReached, synthDoneSpeaking}
COM_CLASS = "SAPI.SPVoice"
name="sapi5"
description="Microsoft Speech API version 5"
@classmethod
def check(cls):
try:
r=winreg.OpenKey(winreg.HKEY_CLASSES_ROOT,cls.COM_CLASS)
r.Close()
return True
except:
return False
ttsAudioStream=None #: Holds the ISPAudio interface for the current voice, to aid in stopping and pausing audio
def __init__(self,_defaultVoiceToken=None):
"""
@param _defaultVoiceToken: an optional sapi voice token which should be used as the default voice (only useful for subclasses)
@type _defaultVoiceToken: ISpeechObjectToken
"""
ensureWaveOutHooks()
self._pitch=50
self._initTts(_defaultVoiceToken)
def terminate(self):
self._eventsConnection = None
self.tts = None
def _getAvailableVoices(self):
voices=OrderedDict()
v=self._getVoiceTokens()
# #2629: Iterating uses IEnumVARIANT and GetBestInterface doesn't work on tokens returned by some token enumerators.
# Therefore, fetch the items by index, as that method explicitly returns the correct interface.
for i in range(len(v)):
try:
ID=v[i].Id
name=v[i].GetDescription()
try:
language=locale.windows_locale[int(v[i].getattribute('language').split(';')[0],16)]
except KeyError:
language=None
except COMError:
log.warning("Could not get the voice info. Skipping...")
voices[ID]=VoiceInfo(ID,name,language)
return voices
def _getVoiceTokens(self):
"""Provides a collection of sapi5 voice tokens. Can be overridden by subclasses if tokens should be looked for in some other registry location."""
return self.tts.getVoices()
def _get_rate(self):
return (self.tts.rate*5)+50
def _get_pitch(self):
return self._pitch
def _get_volume(self):
return self.tts.volume
def _get_voice(self):
return self.tts.voice.Id
def _get_lastIndex(self):
bookmark=self.tts.status.LastBookmark
if bookmark!="" and bookmark is not None:
return int(bookmark)
else:
return None
def _percentToRate(self, percent):
return (percent - 50) // 5
def _set_rate(self,rate):
self.tts.Rate = self._percentToRate(rate)
def _set_pitch(self,value):
#pitch is really controled with xml around speak commands
self._pitch=value
def _set_volume(self,value):
self.tts.Volume = value
def _initTts(self, voice=None):
self.tts=comtypes.client.CreateObject(self.COM_CLASS)
if voice:
# #749: It seems that SAPI 5 doesn't reset the audio parameters when the voice is changed,
# but only when the audio output is changed.
# Therefore, set the voice before setting the audio output.
# Otherwise, we will get poor speech quality in some cases.
self.tts.voice = voice
outputDeviceID=nvwave.outputDeviceNameToID(config.conf["speech"]["outputDevice"], True)
if outputDeviceID>=0:
self.tts.audioOutput=self.tts.getAudioOutputs()[outputDeviceID]
self._eventsConnection = comtypes.client.GetEvents(self.tts, SapiSink(weakref.ref(self)))
self.tts.EventInterests = constants.SVEBookmark | constants.SVEEndInputStream
from comInterfaces.SpeechLib import ISpAudio
try:
self.ttsAudioStream=self.tts.audioOutputStream.QueryInterface(ISpAudio)
except COMError:
log.debugWarning("SAPI5 voice does not support ISPAudio")
self.ttsAudioStream=None
def _set_voice(self,value):
tokens = self._getVoiceTokens()
# #2629: Iterating uses IEnumVARIANT and GetBestInterface doesn't work on tokens returned by some token enumerators.
# Therefore, fetch the items by index, as that method explicitly returns the correct interface.
for i in range(len(tokens)):
voice=tokens[i]
if value==voice.Id:
break
else:
# Voice not found.
return
self._initTts(voice=voice)
def _percentToPitch(self, percent):
return percent // 2 - 25
IPA_TO_SAPI = {
u"θ": u"th",
u"s": u"s",
}
def _convertPhoneme(self, ipa):
# We only know about US English phonemes.
# Rather than just ignoring unknown phonemes, SAPI throws an exception.
# Therefore, don't bother with any other language.
if self.tts.voice.GetAttribute("language") != "409":
raise LookupError("No data for this language")
out = []
outAfter = None
for ipaChar in ipa:
if ipaChar == u"ˈ":
outAfter = u"1"
continue
out.append(self.IPA_TO_SAPI[ipaChar])
if outAfter:
out.append(outAfter)
outAfter = None
if outAfter:
out.append(outAfter)
return u" ".join(out)
def speak(self, speechSequence):
textList = []
# NVDA SpeechCommands are linear, but XML is hierarchical.
# Therefore, we track values for non-empty tags.
# When a tag changes, we close all previously opened tags and open new ones.
tags = {}
# We have to use something mutable here because it needs to be changed by the inner function.
tagsChanged = [True]
openedTags = []
def outputTags():
if not tagsChanged[0]:
return
for tag in reversed(openedTags):
textList.append("</%s>" % tag)
del openedTags[:]
for tag, attrs in tags.items():
textList.append("<%s" % tag)
for attr, val in attrs.items():
textList.append(' %s="%s"' % (attr, val))
textList.append(">")
openedTags.append(tag)
tagsChanged[0] = False
pitch = self._pitch
# Pitch must always be specified in the markup.
tags["pitch"] = {"absmiddle": self._percentToPitch(pitch)}
rate = self.rate
volume = self.volume
for item in speechSequence:
if isinstance(item, str):
outputTags()
textList.append(item.replace("<", "<"))
elif isinstance(item, IndexCommand):
textList.append('<Bookmark Mark="%d" />' % item.index)
elif isinstance(item, CharacterModeCommand):
if item.state:
tags["spell"] = {}
else:
try:
del tags["spell"]
except KeyError:
pass
tagsChanged[0] = True
elif isinstance(item, BreakCommand):
textList.append('<silence msec="%d" />' % item.time)
elif isinstance(item, PitchCommand):
tags["pitch"] = {"absmiddle": self._percentToPitch(int(pitch * item.multiplier))}
tagsChanged[0] = True
elif isinstance(item, VolumeCommand):
if item.multiplier == 1:
try:
del tags["volume"]
except KeyError:
pass
else:
tags["volume"] = {"level": int(volume * item.multiplier)}
tagsChanged[0] = True
elif isinstance(item, RateCommand):
if item.multiplier == 1:
try:
del tags["rate"]
except KeyError:
pass
else:
tags["rate"] = {"absspeed": self._percentToRate(int(rate * item.multiplier))}
tagsChanged[0] = True
elif isinstance(item, PhonemeCommand):
try:
textList.append(u'<pron sym="%s">%s</pron>'
% (self._convertPhoneme(item.ipa), item.text or u""))
except LookupError:
log.debugWarning("Couldn't convert character in IPA string: %s" % item.ipa)
if item.text:
textList.append(item.text)
elif isinstance(item, SpeechCommand):
log.debugWarning("Unsupported speech command: %s" % item)
else:
log.error("Unknown speech: %s" % item)
# Close any tags that are still open.
tags.clear()
tagsChanged[0] = True
outputTags()
text = "".join(textList)
flags = constants.SVSFIsXML | constants.SVSFlagsAsync
self.tts.Speak(text, flags)
def cancel(self):
# SAPI5's default means of stopping speech can sometimes lag at end of speech, especially with Win8 / Win 10 Microsoft Voices.
# Therefore instruct the underlying audio interface to stop first, before interupting and purging any remaining speech.
if self.ttsAudioStream:
self.ttsAudioStream.setState(SPAS_STOP,0)
self.tts.Speak(None, 1|constants.SVSFPurgeBeforeSpeak)
def pause(self,switch):
if switch:
self.cancel()
# SAPI5's default means of pausing in most cases is either extrmemely slow (e.g. takes more than half a second) or does not work at all.
# Therefore instruct the underlying audio interface to pause instead.
if self.ttsAudioStream:
self.ttsAudioStream.setState(SPAS_PAUSE if switch else SPAS_RUN,0)
def isSpeaking(self):
running=None
if self.tts.Status.RunningState == 2: # SRSEIsSpeaking
running=True
elif self.tts.Status.RunningState == 1: # SRSEDone
running=False
return running
|
[
"logHandler.log.warning",
"logHandler.log.debug",
"synthDriverHandler.VoiceInfo",
"nvwave.outputDeviceNameToID",
"audioDucking.isAudioDuckingSupported",
"NVDAHelper.localLib.dllImportTableHooks_unhookSingle",
"synthDriverHandler.synthIndexReached.notify",
"weakref.ref",
"synthDriverHandler.SynthDriver.RateSetting",
"synthDriverHandler.SynthDriver.PitchSetting",
"synthDriverHandler.SynthDriver.VolumeSetting",
"os.path.expandvars",
"synthDriverHandler.synthDoneSpeaking.notify",
"logHandler.log.error",
"winreg.OpenKey",
"audioDucking.AudioDucker",
"synthDriverHandler.SynthDriver.VoiceSetting",
"logHandler.log.debugWarning",
"collections.OrderedDict"
] |
[((2203, 2229), 'audioDucking.AudioDucker', 'audioDucking.AudioDucker', ([], {}), '()\n', (2227, 2229), False, 'import audioDucking\n'), ((2615, 2653), 'audioDucking.isAudioDuckingSupported', 'audioDucking.isAudioDuckingSupported', ([], {}), '()\n', (2651, 2653), False, 'import audioDucking\n'), ((3591, 3646), 'synthDriverHandler.synthIndexReached.notify', 'synthIndexReached.notify', ([], {'synth': 'synth', 'index': 'bookmarkId'}), '(synth=synth, index=bookmarkId)\n', (3615, 3646), False, 'from synthDriverHandler import SynthDriver, VoiceInfo, synthIndexReached, synthDoneSpeaking\n'), ((3831, 3868), 'synthDriverHandler.synthDoneSpeaking.notify', 'synthDoneSpeaking.notify', ([], {'synth': 'synth'}), '(synth=synth)\n', (3855, 3868), False, 'from synthDriverHandler import SynthDriver, VoiceInfo, synthIndexReached, synthDoneSpeaking\n'), ((3925, 3951), 'synthDriverHandler.SynthDriver.VoiceSetting', 'SynthDriver.VoiceSetting', ([], {}), '()\n', (3949, 3951), False, 'from synthDriverHandler import SynthDriver, VoiceInfo, synthIndexReached, synthDoneSpeaking\n'), ((3952, 3977), 'synthDriverHandler.SynthDriver.RateSetting', 'SynthDriver.RateSetting', ([], {}), '()\n', (3975, 3977), False, 'from synthDriverHandler import SynthDriver, VoiceInfo, synthIndexReached, synthDoneSpeaking\n'), ((3978, 4004), 'synthDriverHandler.SynthDriver.PitchSetting', 'SynthDriver.PitchSetting', ([], {}), '()\n', (4002, 4004), False, 'from synthDriverHandler import SynthDriver, VoiceInfo, synthIndexReached, synthDoneSpeaking\n'), ((4005, 4032), 'synthDriverHandler.SynthDriver.VolumeSetting', 'SynthDriver.VolumeSetting', ([], {}), '()\n', (4030, 4032), False, 'from synthDriverHandler import SynthDriver, VoiceInfo, synthIndexReached, synthDoneSpeaking\n'), ((5093, 5106), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5104, 5106), False, 'from collections import OrderedDict\n'), ((7054, 7126), 'nvwave.outputDeviceNameToID', 'nvwave.outputDeviceNameToID', (["config.conf['speech']['outputDevice']", '(True)'], {}), "(config.conf['speech']['outputDevice'], True)\n", (7081, 7126), False, 'import nvwave\n'), ((1508, 1539), 'logHandler.log.debug', 'log.debug', (['f"""Hooked {funcName}"""'], {}), "(f'Hooked {funcName}')\n", (1517, 1539), False, 'from logHandler import log\n'), ((1553, 1592), 'logHandler.log.error', 'log.error', (['f"""Could not hook {funcName}"""'], {}), "(f'Could not hook {funcName}')\n", (1562, 1592), False, 'from logHandler import log\n'), ((1691, 1755), 'NVDAHelper.localLib.dllImportTableHooks_unhookSingle', 'NVDAHelper.localLib.dllImportTableHooks_unhookSingle', (['self._hook'], {}), '(self._hook)\n', (1743, 1755), False, 'import NVDAHelper\n'), ((2680, 2713), 'os.path.expandvars', 'os.path.expandvars', (['"""$SYSTEMROOT"""'], {}), "('$SYSTEMROOT')\n", (2698, 2713), False, 'import os\n'), ((3501, 3576), 'logHandler.log.debugWarning', 'log.debugWarning', (['"""Called Bookmark method on SapiSink while driver is dead"""'], {}), "('Called Bookmark method on SapiSink while driver is dead')\n", (3517, 3576), False, 'from logHandler import log\n'), ((3740, 3816), 'logHandler.log.debugWarning', 'log.debugWarning', (['"""Called Bookmark method on EndStream while driver is dead"""'], {}), "('Called Bookmark method on EndStream while driver is dead')\n", (3756, 3816), False, 'from logHandler import log\n'), ((4423, 4478), 'winreg.OpenKey', 'winreg.OpenKey', (['winreg.HKEY_CLASSES_ROOT', 'cls.COM_CLASS'], {}), '(winreg.HKEY_CLASSES_ROOT, cls.COM_CLASS)\n', (4437, 4478), False, 'import winreg\n'), ((5678, 5707), 'synthDriverHandler.VoiceInfo', 'VoiceInfo', (['ID', 'name', 'language'], {}), '(ID, name, language)\n', (5687, 5707), False, 'from synthDriverHandler import SynthDriver, VoiceInfo, synthIndexReached, synthDoneSpeaking\n'), ((1389, 1463), 'logHandler.log.error', 'log.error', (['"""Error encoding FunctionHooker input parameters"""'], {'exc_info': '(True)'}), "('Error encoding FunctionHooker input parameters', exc_info=True)\n", (1398, 1463), False, 'from logHandler import log\n'), ((7293, 7310), 'weakref.ref', 'weakref.ref', (['self'], {}), '(self)\n', (7304, 7310), False, 'import weakref\n'), ((7550, 7607), 'logHandler.log.debugWarning', 'log.debugWarning', (['"""SAPI5 voice does not support ISPAudio"""'], {}), "('SAPI5 voice does not support ISPAudio')\n", (7566, 7607), False, 'from logHandler import log\n'), ((5606, 5662), 'logHandler.log.warning', 'log.warning', (['"""Could not get the voice info. Skipping..."""'], {}), "('Could not get the voice info. Skipping...')\n", (5617, 5662), False, 'from logHandler import log\n'), ((11350, 11407), 'logHandler.log.debugWarning', 'log.debugWarning', (["('Unsupported speech command: %s' % item)"], {}), "('Unsupported speech command: %s' % item)\n", (11366, 11407), False, 'from logHandler import log\n'), ((11423, 11461), 'logHandler.log.error', 'log.error', (["('Unknown speech: %s' % item)"], {}), "('Unknown speech: %s' % item)\n", (11432, 11461), False, 'from logHandler import log\n'), ((11173, 11248), 'logHandler.log.debugWarning', 'log.debugWarning', (['("Couldn\'t convert character in IPA string: %s" % item.ipa)'], {}), '("Couldn\'t convert character in IPA string: %s" % item.ipa)\n', (11189, 11248), False, 'from logHandler import log\n')]
|
"""
File: 349.py
Title: Intersection of Two Arrays
Difficulty: Easy
URL: https://leetcode.com/problems/intersection-of-two-arrays/
"""
import unittest
from typing import List
class Solution:
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
return list(set(nums1).intersection(set(nums2)))
class SolutionTestCase(unittest.TestCase):
def test_example1(self):
# Input
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
# Output
output = [2]
solution = Solution()
self.assertEqual(solution.intersection(nums1, nums2), output)
def test_example2(self):
# Input
nums1 = [4, 9, 5]
nums2 = [9, 4, 9, 8, 4]
# Output
output = [9, 4]
solution = Solution()
self.assertEqual(solution.intersection(nums1, nums2), output)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main"
] |
[((905, 920), 'unittest.main', 'unittest.main', ([], {}), '()\n', (918, 920), False, 'import unittest\n')]
|
from __future__ import absolute_import
from ..base import BaseObj, FieldMeta, Context
import six
import copy
class BaseObj_v1_2(BaseObj):
__swagger_version__ = '1.2'
class Items(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" Items Object
"""
__swagger_fields__ = {
'$ref': None,
'type': None,
'format': None,
}
class ItemsContext(Context):
""" Context of Items Object
"""
__swagger_ref_object__ = Items
class DataTypeObj(BaseObj_v1_2):
""" Data Type Fields
"""
__swagger_fields__ = {
'type': None,
'$ref': None,
'format': None,
'defaultValue': None,
'enum': None,
'items': None,
'minimum': None,
'maximum': None,
'uniqueItems': None,
}
def __init__(self, ctx):
# Items Object, too lazy to create a Context for DataTypeObj
# to wrap this child.
items_data = ctx._obj.get('items', None)
if items_data:
with ItemsContext(ctx._obj, 'items') as items_ctx:
items_ctx.parse(items_data)
else:
setattr(self, self.get_private_name('items'), None)
super(DataTypeObj, self).__init__(ctx)
class Scope(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" Scope Object
"""
__swagger_fields__ = {
'scope': None,
'description': None,
}
class LoginEndpoint(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" LoginEndpoint Object
"""
__swagger_fields__ = {
'url': None,
}
class Implicit(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" Implicit Object
"""
__swagger_fields__ = {
'loginEndpoint': None,
'tokenName': None,
}
class TokenRequestEndpoint(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" TokenRequestEndpoint Object
"""
__swagger_fields__ = {
'url': None,
'clientIdName': None,
'clientSecretName': None,
}
class TokenEndpoint(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" TokenEndpoint Object
"""
__swagger_fields__ = {
'url': None,
'tokenName': None,
}
class AuthorizationCode(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" AuthorizationCode Object
"""
__swagger_fields__ = {
'tokenRequestEndpoint': None,
'tokenEndpoint': None,
}
class GrantType(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" GrantType Object
"""
__swagger_fields__ = {
'implicit': None,
'authorization_code': None,
}
class Authorizations(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" Authorizations Object
"""
__swagger_fields__ = {
'scope': None,
'description': None,
}
class Authorization(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" Authorization Object
"""
__swagger_fields__ = {
'type': None,
'passAs': None,
'keyname': None,
'scopes': None,
'grantTypes': None,
}
def get_name(self, path):
return path.split('/', 3)[2]
class ResponseMessage(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" ResponseMessage Object
"""
__swagger_fields__ = {
'code': None,
'message': None,
'responseModel': None,
}
class Parameter(six.with_metaclass(FieldMeta, DataTypeObj)):
""" Parameter Object
"""
__swagger_fields__ = {
'paramType': None,
'name': None,
'required': None,
'allowMultiple': None,
'description': None,
}
class Operation(six.with_metaclass(FieldMeta, DataTypeObj)):
""" Operation Object
"""
__swagger_fields__ = {
'method': None,
'nickname': None,
'authorizations': None,
'parameters': None,
'responseMessages': None,
'produces': None,
'consumes': None,
'deprecated': None,
'summary': None,
'notes': None,
}
__internal_fields__ = {
# path from Api object, concated with Resource object
'path': None,
}
def get_name(self, path):
return self.nickname
class Api(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" Api Object
"""
__swagger_fields__ = {
'path': None,
'operations': None,
'description': None,
}
class Property(six.with_metaclass(FieldMeta, DataTypeObj)):
""" Property Object
"""
__swagger_fields__ = {
'description': None,
}
class Model(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" Model Object
"""
__swagger_fields__ = {
'id': None,
'required': [],
'properties': None,
'subTypes': None,
'discriminator': None,
'description': None,
}
__internal_fields__ = {
# for model inheritance
'_extends_': None,
}
def get_name(self, path):
return self.id
class Resource(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" Resource Object
"""
__swagger_fields__ = {
'swaggerVersion': None,
'apiVersion': None,
'apis': None,
'basePath': None,
'resourcePath': None,
'models': None,
'produces': None,
'consumes': None,
'authorizations': None,
'description': None,
}
def __init__(self, ctx):
""" The original structure of API object is very bad
for seeking nickname for operations. Since nickname is unique
in one Resource, we can just make it flat.
"""
super(Resource, self).__init__(ctx)
new_api = {}
for api in ctx._obj['apis']:
for op in api.operations:
name = op.nickname
if name in new_api.keys():
raise ValueError('duplication operation found: ' + name)
# Operation objects now have 'path' attribute.
op.update_field('path', api.path)
# Operation objects' parent is now Resource object(API Declaration).
op._parent__ = self
new_api[name] = op
# replace Api with Operations
self.update_field('apis', new_api)
def get_name(self, path):
return path.split('/', 3)[2]
class Info(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" Info Object
"""
__swagger_fields__ = {
'title': None,
'termsOfServiceUrl': None,
'contact': None,
'license': None,
'licenseUrl': None,
'description': None,
}
class ResourceList(six.with_metaclass(FieldMeta, BaseObj_v1_2)):
""" Resource List Object
"""
__swagger_fields__ = {
'swaggerVersion': None,
'apis': None,
'apiVersion': None,
'info': None,
'authorizations': None,
}
|
[
"six.with_metaclass"
] |
[((196, 239), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (214, 239), False, 'import six\n'), ((1291, 1334), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (1309, 1334), False, 'import six\n'), ((1484, 1527), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (1502, 1527), False, 'import six\n'), ((1648, 1691), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (1666, 1691), False, 'import six\n'), ((1857, 1900), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (1875, 1900), False, 'import six\n'), ((2099, 2142), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (2117, 2142), False, 'import six\n'), ((2300, 2343), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (2318, 2343), False, 'import six\n'), ((2518, 2561), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (2536, 2561), False, 'import six\n'), ((2726, 2769), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (2744, 2769), False, 'import six\n'), ((2928, 2971), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (2946, 2971), False, 'import six\n'), ((3276, 3319), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (3294, 3319), False, 'import six\n'), ((3502, 3544), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'DataTypeObj'], {}), '(FieldMeta, DataTypeObj)\n', (3520, 3544), False, 'import six\n'), ((3780, 3822), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'DataTypeObj'], {}), '(FieldMeta, DataTypeObj)\n', (3798, 3822), False, 'import six\n'), ((4381, 4424), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (4399, 4424), False, 'import six\n'), ((4595, 4637), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'DataTypeObj'], {}), '(FieldMeta, DataTypeObj)\n', (4613, 4637), False, 'import six\n'), ((4763, 4806), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (4781, 4806), False, 'import six\n'), ((5217, 5260), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (5235, 5260), False, 'import six\n'), ((6601, 6644), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (6619, 6644), False, 'import six\n'), ((6909, 6952), 'six.with_metaclass', 'six.with_metaclass', (['FieldMeta', 'BaseObj_v1_2'], {}), '(FieldMeta, BaseObj_v1_2)\n', (6927, 6952), False, 'import six\n')]
|
from setuptools import setup, find_packages
import os
setup(
# Needed to silence warnings (and to be a worthwhile package)
name='Livy-Submit',
url='https://github.com/gdiepen/livy-submit',
author='<NAME>',
author_email='<EMAIL>',
# Needed to actually package something
packages=find_packages(),
# Needed for dependencies
install_requires=[],
# *strongly* suggested for sharing
version='1.1.0',
# The license can be anything you like
license='MIT',
description='Livy-Submit enables you to send your python files to a spark cluster using Livy running on edge node',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
entry_points={'console_scripts' : [
'livy_submit = livysubmit.__main__:main'
]},
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
[
"setuptools.find_packages"
] |
[((307, 322), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (320, 322), False, 'from setuptools import setup, find_packages\n')]
|