content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import pytest
from requests import get
from urllib.parse import urljoin
def test_valid_new_link_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/links/new' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/links/new'))
assert response.status_code == 200
assert '<h1>New link</h1>' in response.text
def test_invalid_new_link_page(wait_for_api):
"""
GIVEN a user has not logged in
WHEN the '/links/new' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/links/new'))
assert response.status_code == 200
assert '<div class="flash">Please login first</div>' in response.text
def test_valid_link_categories_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/links/categories' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/links/categories'))
assert response.status_code == 200
assert '<h1>Link categories</h1>' in response.text
def test_invalid_link_categories_page(wait_for_api):
"""
GIVEN a user has not logged in
WHEN the '/links/categories' page is navigated to (GET)
THEN check user is redirected to homepage because not authorized and flash message is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/links/categories'))
assert response.status_code == 200
assert '<div class="flash">Please login first</div>' in response.text
def test_valid_new_link_category_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/links/category/new' page is navigated to (GET)
THEN check check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/links/category/new'))
assert response.status_code == 200
assert '<h1>New link category</h1>' in response.text
def test_invalid_new_link_category_page(wait_for_api):
"""
GIVEN a user has not logged
WHEN the '/links/category/new' page is navigated to (GET)
THEN check check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/links/category/new'))
assert response.status_code == 200
assert '<div class="flash">Please login first</div>' in response.text
def test_valid_new_link(wait_for_api, login_user):
"""
GIVEN a user has logged in
WHEN the '/links/new' page is navigated to (POST)
THEN check check the response is valid and flash message is correct
"""
new_link = dict(category=1, name='Test link', url='http://www.example.com')
request_session, api_url = wait_for_api
response = request_session.post(urljoin(api_url, '/links/new'), data=new_link, allow_redirects=True)
assert response.status_code == 200
assert '<div class="flash">New link created</div>' in response.text
def test_valid_new_link_category(wait_for_api, login_user):
"""
GIVEN a user has logged in
WHEN the '/links/category/new' page is navigated to (POST)
THEN check check the response is valid and flash message is correct
"""
new_category = dict(category_name='Test category')
request_session, api_url = wait_for_api
response = request_session.post(urljoin(api_url, '/links/category/new'), data=new_category, allow_redirects=True)
assert response.status_code == 200
assert '<div class="flash">New link category created</div>' in response.text
| [
11748,
12972,
9288,
198,
6738,
7007,
1330,
651,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
22179,
198,
198,
4299,
1332,
62,
12102,
62,
3605,
62,
8726,
62,
7700,
7,
17077,
62,
1640,
62,
15042,
11,
17594,
62,
7220,
2599,
198,
... | 2.902991 | 1,371 |
from blockchain import BlockChain
from block import Block
blockchain = BlockChain()
print("***Mining mechgCoin about to start***")
print(blockchain.chain)
last_block = blockchain.latest_block
last_proof_no = last_block.proof_no
proof_no = blockchain.proof_of_work(last_proof_no)
blockchain.new_data(
sender="0", #it implies that this node has created a new block
recipient="Joziel CDB", #let's send Joziel some coins!
quantity=
1, #creating a new block (or identifying the proof number) is awarded with 1
)
last_hash = last_block.calculate_hash
block = blockchain.construct_block(proof_no, last_hash)
print("***Mining mechgCoin has been successful***")
print(blockchain.chain) | [
6738,
11779,
1330,
9726,
35491,
201,
198,
6738,
2512,
1330,
9726,
201,
198,
201,
198,
9967,
7983,
796,
9726,
35491,
3419,
201,
198,
201,
198,
4798,
7203,
8162,
44,
3191,
36571,
70,
24387,
546,
284,
923,
8162,
4943,
201,
198,
4798,
7,
... | 2.931174 | 247 |
# -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2022, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions and classes used to enable more Discord oriented argument converters."""
from __future__ import annotations
__all__: list[str] = [
"ToChannel",
"ToEmoji",
"ToGuild",
"ToInvite",
"ToInviteWithMetadata",
"ToMember",
"ToPresence",
"ToRole",
"ToUser",
"ToVoiceState",
"from_datetime",
"parse_channel_id",
"parse_emoji_id",
"parse_role_id",
"parse_snowflake",
"parse_user_id",
"search_channel_ids",
"search_emoji_ids",
"search_role_ids",
"search_snowflakes",
"search_user_ids",
"to_bool",
"to_channel",
"to_color",
"to_colour",
"to_datetime",
"to_emoji",
"to_guild",
"to_invite",
"to_invite_with_metadata",
"to_member",
"to_presence",
"to_role",
"to_snowflake",
"to_user",
"to_voice_state",
]
import abc
import datetime
import logging
import operator
import re
import typing
import urllib.parse as urlparse
from collections import abc as collections
import hikari
from . import abc as tanjun_abc
from . import injecting
from .dependencies import async_cache
if typing.TYPE_CHECKING:
from . import parsing
_ArgumentT = typing.Union[str, int, float]
_ValueT = typing.TypeVar("_ValueT")
_LOGGER = logging.getLogger("hikari.tanjun.conversion")
class BaseConverter(typing.Generic[_ValueT], abc.ABC):
"""Base class for the standard converters.
.. warning::
Inheriting from this is completely unnecessary and should be avoided
for people using the library unless they know what they're doing.
This is detail of the standard implementation and isn't guaranteed to work
between implementations but will work for implementations which provide
the standard dependency injection or special cased support for these.
While it isn't necessary to subclass this to implement your own converters
since dependency injection can be used to access fields like the current Context,
this class introduces some niceties around stuff like state warnings.
"""
__slots__ = ()
__pdoc__: typing.ClassVar[dict[str, bool]] = {
"async_cache": False,
"cache_components": False,
"intents": False,
"requires_cache": False,
"__pdoc__": False,
}
@property
@abc.abstractmethod
def async_caches(self) -> collections.Sequence[typing.Any]:
"""Collection of the asynchronous caches that this converter relies on.
This will only be necessary if the suggested intents or cache_components
aren't enabled for a converter which requires cache.
"""
@property
@abc.abstractmethod
def cache_components(self) -> hikari.CacheComponents:
"""Cache component(s) the converter takes advantage of.
.. note::
Unless `BaseConverter.requires_cache` is `True`, these cache components
aren't necessary but simply avoid the converter from falling back to
REST requests.
This will be `hikari.CacheComponents.NONE` if the converter doesn't
make cache calls.
"""
@property
@abc.abstractmethod
def intents(self) -> hikari.Intents:
"""Gateway intents this converter takes advantage of.
.. note::
This field is supplementary to `BaseConverter.cache_components` and
is used to detect when the relevant component might not actually be
being kept up to date or filled by gateway events.
Unless `BaseConverter.requires_cache` is `True`, these intents being
disabled won't stop this converter from working as it'll still fall
back to REST requests.
"""
@property
@abc.abstractmethod
def requires_cache(self) -> bool:
"""Whether this converter relies on the relevant cache stores to work.
If this is `True` then this converter will not function properly
in an environment `BaseConverter.intents` or `BaseConverter.cache_components`
isn't satisfied and will never fallback to REST requests.
"""
def check_client(self, client: tanjun_abc.Client, parent_name: str, /) -> None:
"""Check that this converter will work with the given client.
This never raises any errors but simply warns the user if the converter
is not compatible with the given client.
Parameters
----------
client : tanjun.abc.Client
The client to check against.
parent_name : str
The name of the converter's parent, used for warning messages.
"""
# TODO: upgrade this stuff to the standard interface
assert isinstance(client, injecting.InjectorClient)
if not client.cache and any(
client.get_type_dependency(cls) is injecting.UNDEFINED for cls in self.async_caches
):
if self.requires_cache:
_LOGGER.warning(
f"Converter {self!r} registered with {parent_name} will always fail with a stateless client.",
)
elif self.cache_components:
_LOGGER.warning(
f"Converter {self!r} registered with {parent_name} may not perform optimally in a stateless client.",
)
# elif missing_components := (self.cache_components & ~client.cache.components):
# _LOGGER.warning(
if client.shards and (missing_intents := self.intents & ~client.shards.intents):
_LOGGER.warning(
f"Converter {self!r} registered with {parent_name} may not perform as expected "
f"without the following intents: {missing_intents}",
)
_DmCacheT = typing.Optional[async_cache.SfCache[hikari.DMChannel]]
_GuildChannelCacheT = typing.Optional[async_cache.SfCache[hikari.PartialChannel]]
# TODO: GuildChannelConverter
class ToChannel(BaseConverter[hikari.PartialChannel]):
"""Standard converter for channels mentions/IDs.
For a standard instance of this see `to_channel`.
"""
__slots__ = ("_include_dms",)
def __init__(self, *, include_dms: bool = True) -> None:
"""Initialise a to channel converter.
Other Parameters
----------------
include_dms : bool
Whether to include DM channels in the results.
May lead to a lot of extra fallbacks to REST requests if
the client doesn't have a registered async cache for DMs.
Defaults to `True`.
"""
self._include_dms = include_dms
@property
@property
@property
@property
ChannelConverter = ToChannel
"""Deprecated alias of `ToChannel`."""
_EmojiCacheT = typing.Optional[async_cache.SfCache[hikari.KnownCustomEmoji]]
class ToEmoji(BaseConverter[hikari.KnownCustomEmoji]):
"""Standard converter for custom emojis.
For a standard instance of this see `to_emoji`.
.. note::
If you just want to convert inpute to a `hikari.Emoji`, `hikari.CustomEmoji`
or `hikari.UnicodeEmoji` without making any cache or REST calls then you
can just use the relevant `hikari.Emoji.parse`, `hikari.CustomEmoji.parse`
or `hikari.UnicodeEmoji.parse` methods.
"""
__slots__ = ()
@property
@property
@property
@property
EmojiConverter = ToEmoji
"""Deprecated alias of `ToEmoji`."""
_GuildCacheT = typing.Optional[async_cache.SfCache[hikari.Guild]]
class ToGuild(BaseConverter[hikari.Guild]):
"""Stanard converter for guilds.
For a standard instance of this see `to_guild`.
"""
__slots__ = ()
@property
@property
@property
@property
GuildConverter = ToGuild
"""Deprecated alias of `ToGuild`."""
_InviteCacheT = typing.Optional[async_cache.AsyncCache[str, hikari.InviteWithMetadata]]
class ToInvite(BaseConverter[hikari.Invite]):
"""Standard converter for invites."""
__slots__ = ()
@property
@property
@property
@property
InviteConverter = ToInvite
"""Deprecated alias of `ToInvite`."""
class ToInviteWithMetadata(BaseConverter[hikari.InviteWithMetadata]):
"""Standard converter for invites with metadata.
For a standard instance of this see `to_invite_with_metadata`.
.. note::
Unlike `InviteConverter`, this converter is cache dependent.
"""
__slots__ = ()
@property
@property
@property
@property
InviteWithMetadataConverter = ToInviteWithMetadata
"""Deprecated alias of `ToInviteWithMetadata`."""
_MemberCacheT = typing.Optional[async_cache.SfGuildBound[hikari.Member]]
class ToMember(BaseConverter[hikari.Member]):
"""Standard converter for guild members.
For a standard instance of this see `to_member`.
This converter allows both mentions, raw IDs and partial usernames/nicknames
and only works within a guild context.
"""
__slots__ = ()
@property
@property
@property
@property
MemberConverter = ToMember
"""Deprecated alias of `ToMember`."""
_PresenceCacheT = typing.Optional[async_cache.SfGuildBound[hikari.MemberPresence]]
class ToPresence(BaseConverter[hikari.MemberPresence]):
"""Standard converter for presences.
For a standard instance of this see `to_presence`.
This converter is cache dependent and only works in a guild context.
"""
__slots__ = ()
@property
@property
@property
@property
PresenceConverter = ToPresence
"""Deprecated alias of `ToPresence`."""
_RoleCacheT = typing.Optional[async_cache.SfCache[hikari.Role]]
class ToRole(BaseConverter[hikari.Role]):
"""Standard converter for guild roles.
For a standard instance of this see `to_role`.
"""
__slots__ = ()
@property
@property
@property
@property
RoleConverter = ToRole
"""Deprecated alias of `ToRole`."""
_UserCacheT = typing.Optional[async_cache.SfCache[hikari.User]]
class ToUser(BaseConverter[hikari.User]):
"""Standard converter for users.
For a standard instance of this see `to_user`.
"""
__slots__ = ()
@property
@property
@property
@property
UserConverter = ToUser
"""Deprecated alias of `ToUser`."""
_VoiceStateCacheT = typing.Optional[async_cache.SfGuildBound[hikari.VoiceState]]
class ToVoiceState(BaseConverter[hikari.VoiceState]):
"""Standard converter for voice states.
For a standard instance of this see `to_voice_state`.
.. note::
This converter is cache dependent and only works in a guild context.
"""
__slots__ = ()
@property
@property
@property
@property
VoiceStateConverter = ToVoiceState
"""Deprecated alias of `ToVoiceState`."""
_IDSearcherSig = collections.Callable[[_ArgumentT], list[hikari.Snowflake]]
_SNOWFLAKE_REGEX = re.compile(r"<[@&?!#a]{0,3}(?::\w+:)?(\d+)>")
parse_snowflake: _IDMatcherSig = _make_snowflake_parser(_SNOWFLAKE_REGEX)
"""Parse a snowflake from a string or int value.
Parameters
----------
value: str | int
The value to parse (this argument can only be passed positionally).
Other Parameters
----------------
message: str
The error message to raise if the value cannot be parsed.
Returns
-------
hikari.Snowflake
The parsed snowflake.
Raises
------
ValueError
If the value cannot be parsed.
"""
search_snowflakes: _IDSearcherSig = _make_snowflake_searcher(_SNOWFLAKE_REGEX)
"""Get the snowflakes in a string.
Parameters
----------
value: str | int
The value to parse (this argument can only be passed positionally).
Returns
-------
list[hikari.Snowflake]
List of the snowflakes in the string.
"""
_CHANNEL_ID_REGEX = re.compile(r"<#(\d+)>")
parse_channel_id: _IDMatcherSig = _make_snowflake_parser(_CHANNEL_ID_REGEX)
"""Parse a channel ID from a string or int value.
Parameters
----------
value: str | int
The value to parse (this argument can only be passed positionally).
Other Parameters
----------------
message: str
The error message to raise if the value cannot be parsed.
Returns
-------
hikari.Snowflake
The parsed channel ID.
Raises
------
ValueError
If the value cannot be parsed.
"""
search_channel_ids: _IDSearcherSig = _make_snowflake_searcher(_CHANNEL_ID_REGEX)
"""Get the channel IDs in a string.
Parameters
----------
value: str | int
The value to parse (this argument can only be passed positionally).
Returns
-------
list[hikari.Snowflake]
List of the channel IDs in the string.
"""
_EMOJI_ID_REGEX = re.compile(r"<a?:\w+:(\d+)>")
parse_emoji_id: _IDMatcherSig = _make_snowflake_parser(_EMOJI_ID_REGEX)
"""Parse an Emoji ID from a string or int value.
Parameters
----------
value: str | int
The value to parse (this argument can only be passed positionally).
Other Parameters
----------------
message: str
The error message to raise if the value cannot be parsed.
Returns
-------
hikari.Snowflake
The parsed Emoji ID.
Raises
------
ValueError
If the value cannot be parsed.
"""
search_emoji_ids: _IDSearcherSig = _make_snowflake_searcher(_EMOJI_ID_REGEX)
"""Get the emoji IDs in a string.
Parameters
----------
value: str | int
The value to parse (this argument can only be passed positionally).
Returns
-------
list[hikari.Snowflake]
List of the emoji IDs in the string.
"""
_ROLE_ID_REGEX = re.compile(r"<@&(\d+)>")
parse_role_id: _IDMatcherSig = _make_snowflake_parser(_ROLE_ID_REGEX)
"""Parse a role ID from a string or int value.
Parameters
----------
value: str | int
The value to parse (this argument can only be passed positionally).
Other Parameters
----------------
message: str
The error message to raise if the value cannot be parsed.
Returns
-------
hikari.Snowflake
The parsed role ID.
Raises
------
ValueError
If the value cannot be parsed.
"""
search_role_ids: _IDSearcherSig = _make_snowflake_searcher(_ROLE_ID_REGEX)
"""Get the role IDs in a string.
Parameters
----------
value: str | int
The value to parse (this argument can only be passed positionally).
Returns
-------
list[hikari.Snowflake]
List of the role IDs in the string.
"""
_USER_ID_REGEX = re.compile(r"<@!?(\d+)>")
parse_user_id: _IDMatcherSig = _make_snowflake_parser(_USER_ID_REGEX)
"""Parse a user ID from a string or int value.
Parameters
----------
value: str | int
The value to parse (this argument can only be passed positionally).
Other Parameters
----------------
message: str
The error message to raise if the value cannot be parsed.
Returns
-------
hikari.Snowflake
The parsed user ID.
Raises
------
ValueError
If the value cannot be parsed.
"""
search_user_ids: _IDSearcherSig = _make_snowflake_searcher(_USER_ID_REGEX)
"""Get the user IDs in a string.
Parameters
----------
value: str | int
The value to parse (this argument can only be passed positionally).
Returns
-------
list[hikari.Snowflake]
List of the user IDs in the string.
"""
defragment_url: collections.Callable[[str], urlparse.DefragResult] = _build_url_parser(urlparse.urldefrag)
"""Convert an argument to a defragmented URL.
Parameters
----------
value: str
The value to parse (this argument can only be passed positionally).
Returns
-------
urllib.parse.DefragResult
The parsed URL.
Raises
------
ValueError
If the argument couldn't be parsed.
"""
parse_url: collections.Callable[[str], urlparse.ParseResult] = _build_url_parser(urlparse.urlparse)
"""Convert an argument to a parsed URL.
Parameters
----------
value: str
The value to parse (this argument can only be passed positionally).
Returns
-------
urllib.parse.ParseResult
The parsed URL.
Raises
------
ValueError
If the argument couldn't be parsed.
"""
split_url: collections.Callable[[str], urlparse.SplitResult] = _build_url_parser(urlparse.urlsplit)
"""Convert an argument to a split URL.
Parameters
----------
value: str
The value to parse (this argument can only be passed positionally).
Returns
-------
urllib.parse.SplitResult
The split URL.
Raises
------
ValueError
If the argument couldn't be parsed.
"""
_DATETIME_REGEX = re.compile(r"<-?t:(\d+)(?::\w)?>")
def to_datetime(value: str, /) -> datetime.datetime:
"""Parse a datetime from Discord's datetime format.
More information on this format can be found at
https://discord.com/developers/docs/reference#message-formatting-timestamp-styles
Parameters
----------
value: str
The value to parse.
Returns
-------
datetime.datetime
The parsed datetime.
Raises
------
ValueError
If the value cannot be parsed.
"""
try:
timestamp = int(next(_DATETIME_REGEX.finditer(value)).groups()[0])
except StopIteration:
raise ValueError("Not a valid datetime") from None
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
_VALID_DATETIME_STYLES = frozenset(("t", "T", "d", "D", "f", "F", "R"))
def from_datetime(value: datetime.datetime, /, *, style: str = "f") -> str:
"""Format a datetime as Discord's datetime format.
More information on this format can be found at
https://discord.com/developers/docs/reference#message-formatting-timestamp-styles
Parameters
----------
value: datetime.datetime
The datetime to format.
Other Parameters
----------------
style: str
The style to use.
The valid styles can be found at
https://discord.com/developers/docs/reference#message-formatting-formats
and this defaults to `"f"`.
Returns
-------
str
The formatted datetime.
Raises
------
ValueError
If the provided datetime is timezone naive.
If an invalid style is provided.
"""
if style not in _VALID_DATETIME_STYLES:
raise ValueError(f"Invalid style: {style}")
if value.tzinfo is None:
raise ValueError("Cannot convert naive datetimes, please specify a timezone.")
return f"<t:{round(value.timestamp())}:{style}>"
_YES_VALUES = frozenset(("y", "yes", "t", "true", "on", "1"))
_NO_VALUES = frozenset(("n", "no", "f", "false", "off", "0"))
def to_bool(value: str, /) -> bool:
"""Convert user string input into a boolean value.
Parameters
----------
value: str
The value to convert.
Returns
-------
bool
The converted value.
Raises
------
ValueError
If the value cannot be converted.
"""
value = value.lower().strip()
if value in _YES_VALUES:
return True
if value in _NO_VALUES:
return False
raise ValueError(f"Invalid bool value `{value}`")
def to_color(argument: _ArgumentT, /) -> hikari.Color:
"""Convert user input to a `hikari.colors.Color` object."""
if isinstance(argument, str):
values = argument.split(" ")
if all(value.isdigit() for value in values):
if len(values) == 1:
return hikari.Color.of(int(values[0]))
return hikari.Color.of(list(map(int, values)))
if len(values) == 1:
return hikari.Color.of(values[0])
raise ValueError("Not a valid color representation")
return hikari.Color.of(argument)
_TYPE_OVERRIDES: dict[collections.Callable[..., typing.Any], collections.Callable[[str], typing.Any]] = {
bool: to_bool,
bytes: lambda d: bytes(d, "utf-8"),
bytearray: lambda d: bytearray(d, "utf-8"),
datetime.datetime: to_datetime,
hikari.Snowflake: parse_snowflake,
urlparse.DefragResult: defragment_url,
urlparse.ParseResult: parse_url,
urlparse.SplitResult: split_url,
}
to_channel: typing.Final[ToChannel] = ToChannel()
"""Convert user input to a `hikari.PartialChannel` object."""
to_colour: typing.Final[collections.Callable[[_ArgumentT], hikari.Color]] = to_color
"""Convert user input to a `hikari.Color` object."""
to_emoji: typing.Final[ToEmoji] = ToEmoji()
"""Convert user input to a cached `hikari.KnownCustomEmoji` object.
.. note::
If you just want to convert inpute to a `hikari.Emoji`, `hikari.CustomEmoji`
or `hikari.UnicodeEmoji` without making any cache or REST calls then you
can just use the relevant `hikari.Emoji.parse`, `hikari.CustomEmoji.parse`
or `hikari.UnicodeEmoji.parse` methods.
"""
to_guild: typing.Final[ToGuild] = ToGuild()
"""Convert user input to a `hikari.Guild` object."""
to_invite: typing.Final[ToInvite] = ToInvite()
"""Convert user input to a cached `hikari.InviteWithMetadata` object."""
to_invite_with_metadata: typing.Final[ToInviteWithMetadata] = ToInviteWithMetadata()
"""Convert user input to a `hikari.Invite` object."""
to_member: typing.Final[ToMember] = ToMember()
"""Convert user input to a `hikari.Member` object."""
to_presence: typing.Final[ToPresence] = ToPresence()
"""Convert user input to a cached `hikari.MemberPresence`."""
to_role: typing.Final[ToRole] = ToRole()
"""Convert user input to a `hikari.Role` object."""
to_snowflake: typing.Final[collections.Callable[[_ArgumentT], hikari.Snowflake]] = parse_snowflake
"""Convert user input to a `hikari.Snowflake`.
.. note::
This also range validates the input.
"""
to_user: typing.Final[ToUser] = ToUser()
"""Convert user input to a `hikari.User` object."""
to_voice_state: typing.Final[ToVoiceState] = ToVoiceState()
"""Convert user input to a cached `hikari.VoiceState`."""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
3075,
400,
261,
25,
3303,
62,
5715,
28,
18,
198,
2,
347,
10305,
513,
12,
2601,
682,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
12131,
12,
1238,
1828,
11,
38996,
... | 2.762002 | 8,332 |
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Auth import Auth
from PLC.Nodes import Node, Nodes
class DeleteNode(Method):
"""
Mark an existing node as deleted.
PIs and techs may only delete nodes at their own sites. ins may
delete nodes at any site.
Returns 1 if successful, faults otherwise.
"""
roles = ['admin', 'pi', 'tech']
accepts = [
Auth(),
Mixed(Node.fields['node_id'],
Node.fields['hostname'])
]
returns = Parameter(int, '1 if successful')
| [
6738,
350,
5639,
13,
37,
13185,
1330,
1635,
198,
6738,
350,
5639,
13,
17410,
1330,
11789,
198,
6738,
350,
5639,
13,
36301,
1330,
25139,
2357,
11,
35250,
198,
6738,
350,
5639,
13,
30515,
1330,
26828,
198,
6738,
350,
5639,
13,
45,
4147,... | 2.728111 | 217 |
import json
import urllib.request
import urllib.error
from enum import Enum
from flask import Flask, render_template
app = Flask(__name__)
STATUS = {}
@app.route("/")
@app.route("/happy")
| [
11748,
33918,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
2956,
297,
571,
13,
18224,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
... | 2.880597 | 67 |
import heterocl as hcl
import numpy as np
test_conv2D_lb_wb() | [
11748,
14445,
38679,
355,
289,
565,
198,
11748,
299,
32152,
355,
45941,
198,
198,
9288,
62,
42946,
17,
35,
62,
23160,
62,
39346,
3419
] | 2.583333 | 24 |
"""
On a staircase, the i-th step has some non-negative cost cost[i] assigned (0 indexed).
Once you pay the cost, you can either climb one or two steps. You need to find minimum cost to reach the top of the floor, and you can either start from the step with index 0, or the step with index 1.
Example 1:
Input: cost = [10, 15, 20]
Output: 15
Explanation: Cheapest is start on cost[1], pay that cost and go to the top.
Example 2:
Input: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
Output: 6
Explanation: Cheapest is start on cost[0], and only step on 1s, skipping cost[3].
Note:
cost will have a length in the range [2, 1000].
Every cost[i] will be an integer in the range [0, 999].
"""
| [
37811,
198,
2202,
257,
27656,
11,
262,
1312,
12,
400,
2239,
468,
617,
1729,
12,
31591,
1575,
1575,
58,
72,
60,
8686,
357,
15,
41497,
737,
198,
198,
7454,
345,
1414,
262,
1575,
11,
345,
460,
2035,
12080,
530,
393,
734,
4831,
13,
92... | 3.16055 | 218 |
"""Module for running all unit tests of panter."""
import numpy as np
from tests.background_subtraction import do_backgroundfittest
from tests.basicfit import do_histtestfit
from tests.filter import do_histtestfilter
from tests.hist_output import do_histtestout
from tests.histogram import do_histtestbasic
def run_all():
"""Running all unit tests for a global test of panter."""
tests = [
do_histtestbasic(),
do_histtestout(),
do_histtestfilter(),
do_histtestfit(),
do_backgroundfittest(),
]
print(tests)
assert np.asarray(tests).sum() == len(tests)
print("GREAT SUCCESS. \nAll unit tests passed. \nVERY NICE. ")
if __name__ == "__main__":
run_all()
| [
37811,
26796,
329,
2491,
477,
4326,
5254,
286,
3425,
353,
526,
15931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
5254,
13,
25249,
62,
7266,
83,
7861,
1330,
466,
62,
25249,
69,
715,
395,
198,
6738,
5254,
13,
35487,
11147,... | 2.716418 | 268 |
import frappe
from frappe import msgprint, throw, _
# ruleid: frappe-missing-translate-function-python
throw("Error Occured")
# ruleid: frappe-missing-translate-function-python
frappe.throw("Error Occured")
# ruleid: frappe-missing-translate-function-python
frappe.msgprint("Useful message")
# ruleid: frappe-missing-translate-function-python
msgprint("Useful message")
# ok: frappe-missing-translate-function-python
translatedmessage = _("Hello")
# ok: frappe-missing-translate-function-python
throw(translatedmessage)
# ok: frappe-missing-translate-function-python
msgprint(translatedmessage)
# ok: frappe-missing-translate-function-python
msgprint(_("Helpful message"))
# ok: frappe-missing-translate-function-python
frappe.throw(_("Error occured"))
| [
11748,
5306,
27768,
198,
6738,
5306,
27768,
1330,
31456,
4798,
11,
3714,
11,
4808,
628,
198,
2,
3896,
312,
25,
5306,
27768,
12,
45688,
12,
7645,
17660,
12,
8818,
12,
29412,
198,
16939,
7203,
12331,
10775,
1522,
4943,
198,
198,
2,
3896... | 3.105691 | 246 |
import random
import string
import app
import json
import unittest
from datetime import datetime
App = app.app
testSet = {
True : [
'''[{"date": "2015-05-12T14:36:00.451765",
"md5checksum": "e8c83e232b64ce94fdd0e4539ad0d44f",
"name": "John Doe",
"uid": "1"},
{"date": "2015-05-13T14:38:00.451765",
"md5checksum": "b419795d50db2a35e94c8364978d898f",
"name": "Jane Doe",
"uid": "2"}]''',
'''{"date": "2015-05-12T14:37:00.451765",
"md5checksum": "e8c83e232b64ce94fdd0e4539ad0d44f",
"name": "Carl Doe",
"uid": "3"}'''
],
False : [
'''[{"date": "2015-05-12T14:36:00.451765",
"md5checksum": "fffffff32b64ce94fdd0e4539ad0d44f",
"name": "John Doe",
"uid": "11"},
{"date": "2015-05-13T14:38:00.451765",
"md5checksum": "b419795d50db2a35e94c8364978d898f",
"name": "Jane Doe",
"uid": "12"}]''',
'''{"date": "2015-05-12T14:37:00.451765",
"md5checksum": "ffffff232b64ce94fdd0e4539ad0d44f",
"name": "Carl Doe",
"uid": "13"}''',
'''{"date": "2015-05-14T14:37:00.451765",
"md5checksum": "ffffff232b64ce94fdd0e4539ad0d44f",
"name": "Rozalie Doe",
"uid": "14"}'''
]
}
if __name__ == '__main__':
unittest.main()
| [
11748,
4738,
198,
11748,
4731,
198,
11748,
598,
198,
11748,
33918,
198,
11748,
555,
715,
395,
198,
6738,
4818,
8079,
1330,
4818,
8079,
628,
198,
4677,
796,
598,
13,
1324,
198,
198,
9288,
7248,
796,
1391,
198,
220,
220,
220,
6407,
1058... | 1.632609 | 920 |
import numpy as np
import pandas as pd
from scipy import optimize
from sklearn.linear_model import LinearRegression
filename = "ThermalConductivity_temperature.dat"
with open(filename,'r') as f:
lines = [k.strip() for k in f.readlines()]
rows =[]
for i, line in enumerate(lines):
line_args = [k.strip() for k in line.split()]
if i == 0:
column_names = line_args
else:
row = [float(k) for k in line_args]
rows.append(row)
# print column names
print(column_names)
# create a pandas dataframe
df = pd.DataFrame(data=rows,columns=column_names)
for k in ['k11','k22','k33']:
print(80*'-')
print(k)
y = df[k].values.tolist()
x = df[['T','P']].values.tolist()
popt, pcov = optimize.curve_fit(thermal_conductivity_formula,x,y)
print(popt)
print(pcov)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
629,
541,
88,
1330,
27183,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
44800,
8081,
2234,
198,
34345,
796,
366,
35048,
7617,
34,
12920,
3458,
62,
... | 2.394737 | 342 |
import os
prompt = ':prompt:'
int_if_name_in_openstack = 'infra-int'
reg = os.getenv('REG')
reg_port = os.getenv('REG_PORT')
reg_path = os.getenv('REG_PATH')
test_image = "kubernetespause"
source_folder = os.getenv('SOURCE_FOLDER')
vnf_id = os.getenv('STACK_ID')
cbam_py = "{}/scripts/cbam.py".format(source_folder)
registry_cert = '/etc/docker-registry/registry?.pem'
registry_key = '/etc/docker-registry/registry?-key.pem'
registry_cacert = '/etc/docker-registry/ca.pem'
ROBOT_LOG_PATH = "/tmp/"
registry = {'url': reg, 'port': reg_port}
int_sshd_config_name = "sshd_config_int"
ext_sshd_config_name = "sshd_config_ext"
sshd_port = "22"
dns_masq_port = "53"
kube_dns_port = "10053"
min_dns_replica = 1
max_dns_replica = 3
test_address1 = 'google.com'
test_address2 = 'tiller.kube-system.svc.rec.io'
crf_node_openstack_file_types = ["user_config.yaml"]
pressure_default_timeout = 600
# TC014 constant
INFLUXDB_URL = "http://influxdb.kube-system.svc.nokia.net:8086"
GRAFANA_URL = "http://monitoring-grafana.kube-system.svc.nokia.net:8080"
# TC016 constant
docker_size_quota = '2G'
# TC Fluentd
ELASTICSEARCH_URL = "http://elasticsearch-logging.kube-system.svc.nokia.net:9200"
USER_CONFIG_PATH = "/opt/nokia/userconfig/user_config.yaml"
ES_IDX_PREFIX = "caas"
test_chart = dict(name="busybox3", release_name="custom-oper-test", chart_version="3.3.3",
repo="default/",
kube_objects=[dict(obj_type="pod", obj_name="busybox3", obj_count="1",
namespace="kube-system")])
su_test_chart = dict(name="su-test", release_name="su-test", chart_version="1.1.1",
su_version="1.1.1", repo="default/",
kube_objects=[dict(obj_type="pod", obj_name="su-test", obj_count="10",
namespace="kube-system")])
su_test_chart1 = dict(name="su-test", release_name="su-test", chart_version="1.1.2",
su_version="1.1.1", repo="default/",
kube_objects=[dict(obj_type="pod", obj_name="su-test", obj_count="10",
namespace="kube-system")])
su_test_chart_f = dict(name="su-test_f", release_name="su-test", chart_version="1.1.4",
su_version="1.1.1", repo="default/",
kube_objects=[dict(obj_type="pod", obj_name="su-test_f", obj_count="10",
namespace="kube-system")])
pv_test_pod = dict(obj_type="pod", obj_name="pv-test-deployment", obj_count="2", namespace="default")
pv_test_pvc = dict(obj_type="pvc", obj_name="pvc", obj_count="1", namespace="default")
kube_controller_manager = dict(obj_type="pod", obj_name="kube-controller-manager", obj_count="3", namespace="kube-system")
influxdb_service = dict(obj_type="service", obj_name="influxdb", obj_count="1", namespace="kube-system")
influxdb_deployment = dict(obj_type="deployment", obj_name="influxdb", obj_count="1", namespace="kube-system")
grafana_service = dict(obj_type="service", obj_name="monitoring-grafana", obj_count="1", namespace="kube-system")
grafana_deployment = dict(obj_type="deployment", obj_name="monitoring-grafana", obj_count="1", namespace="kube-system")
danmnet_pods1 = dict(obj_type="pod", obj_name="danmnet-pods1", obj_count="4", namespace="default", ip_list=[])
danmnet_pods2 = dict(obj_type="pod", obj_name="danmnet-pods2", obj_count="3", namespace="default", ip_list=[])
danmnet_pods3 = dict(obj_type="pod", obj_name="danmnet-pods3", obj_count="4", namespace="default", ip_list=[])
danmnet_pods4 = dict(obj_type="pod", obj_name="danmnet-pods4", obj_count="5", namespace="kube-system",ip_list=[])
danmnet_pods5 = dict(obj_type="pod", obj_name="danmnet-pods5", obj_count="1", namespace="kube-system",ip_list=[])
danmnet_pods6 = dict(obj_type="pod", obj_name="danmnet-pods6", obj_count="6", namespace="default", ip_list=[])
danmnet_pods7 = dict(obj_type="pod", obj_name="danmnet-pods7", obj_count="5", namespace="default", ip_list=[])
danmnet_pods8 = dict(obj_type="pod", obj_name="danmnet-pods8", obj_count="1", namespace="default", ip_list=[])
danmnet_pods9 = dict(obj_type="pod", obj_name="danmnet-pods9", obj_count="1", namespace="kube-system",ip_list=[])
danmnet_pods10 = dict(obj_type="pod", obj_name="danmnet-pods10", obj_count="1", namespace="default", ip_list=[])
danmnet_pods11 = dict(obj_type="pod", obj_name="danmnet-pods11", obj_count="1", namespace="default", ip_list=[])
danmnet_pods12 = dict(obj_type="pod", obj_name="danmnet-pods12", obj_count="1", namespace="default", ip_list=[])
danmnet_pods13 = dict(obj_type="pod", obj_name="danmnet-pods13", obj_count="1", namespace="default", ip_list=[])
danmnet_pods14 = dict(obj_type="pod", obj_name="danmnet-pods14", obj_count="1", namespace="default", ip_list=[])
danmnet_pods_all = dict(obj_type="pod", obj_name="danmnet-pods", obj_count="0", namespace="default", ip_list=[])
php_apache_pod = dict(obj_type="pod", obj_name="php-apache", obj_count="1", namespace="default")
podinfo_pod = dict(obj_type="pod", obj_name="podinfo", obj_count="2", namespace="kube-system")
load_generator_for_apache = dict(obj_type="pod", obj_name="load-generator-for-apache", obj_count="1", namespace="default")
http_traffic_gen = dict(obj_type="pod", obj_name="http-traffic-gen", obj_count="1", namespace="default")
pods_skipped = ['load-generator-for-apache', 'php-apache-deployment', 'pv-test-deployment', 'danmnet-pods',
test_chart['kube_objects'][0]['obj_name'], 'registry-update', 'su-test', 'cpu-pooling', 'swift-update',
'su-test', 'podinfo', 'tennet-pod']
services_skipped = ['selinux-policy-migrate-local-changes', 'cloud-final.service', 'kdump.service',
'postfix.service']
danmnets_properties = {
'd_test-net1': {'name':"test-net1", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"201", 'routes':"", 'vxlan':"", 'vlan':""},
'd_test-net2': {'name':"test-net2", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"vx_test-net2", 'rt_tables':"11", 'routes':"10.0.0.0/32: 10.0.0.50", 'vxlan':"50", 'vlan':""},
'ks_test-net2': {'name':"test-net2", 'Validation':"true", 'NetworkType':"", 'namespace':"kube-system", 'host_if':"vx_test-net2", 'rt_tables':"11", 'routes':"10.1.1.0/32: 10.1.1.1", 'vxlan':"50", 'vlan':""},
'd_test-net4': {'name':"test-net4", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"13", 'routes':"", 'vxlan':"", 'vlan':""},
'd_test-net5': {'name':"test-net5", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"14", 'routes':"", 'vxlan':"", 'vlan':""},
'd_test-net6': {'name':"test-net6", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"vx_test-net6", 'rt_tables':"", 'routes':"", 'vxlan':"52", 'vlan':""},
'd_test-net7': {'name':"test-net7", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"vx_test-net7", 'rt_tables':"15", 'routes':"", 'vxlan':"53", 'vlan':""},
'd_test-net8': {'name':"test-net8", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"vx_test-net8", 'rt_tables':"15", 'routes':"10.10.0.0/32: 10.10.0.1", 'vxlan':"50", 'vlan':""},
'd_test-net13': {'name':"test-net13", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"vx_test-net13", 'rt_tables':"20", 'routes':"", 'vxlan':"56", 'vlan':""},
'd_test-net15': {'name':"test-net15", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"test-net15.1", 'rt_tables':"22", 'routes':"", 'vxlan':"", 'vlan':"1"},
'd_test-net16': {'name':"test-net16", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"test-net16.4094", 'rt_tables':"23", 'routes':"", 'vxlan':"", 'vlan':"4094"},
'd_test-net20': {'name':"test-net20", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"vx_test-net20", 'rt_tables':"27", 'routes':"", 'vxlan':"", 'vlan':""},
'd_test-net21': {'name':"test-net21", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"vx_test-net21", 'rt_tables':"28", 'routes':"", 'vxlan':"16777214", 'vlan':""},
'd_test-net23': {'name':"test-net23", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"vx_test-net23", 'rt_tables':"30", 'routes':"", 'vxlan':"", 'vlan':""},
'd_test-net24': {'name':"test-net24", 'Validation':"false", 'NetworkType':"flannel", 'namespace':"default", 'host_if':"", 'rt_tables':"31", 'routes':"", 'vxlan':"58", 'vlan':"57"},
'd_test-net25': {'name':"test-net25", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"test-net25.58", 'rt_tables':"10", 'routes':"10.10.0.0/32: 10.10.0.40", 'vxlan':"", 'vlan':"58"},
'd_test-net26': {'name':"test-net26", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"vx_test-net26", 'rt_tables':"10", 'routes':"", 'vxlan':"60", 'vlan':""},
'ks_test-net27': {'name':"test-net27", 'Validation':"true", 'NetworkType':"", 'namespace':"kube-system",'host_if':"vx_test-net27", 'rt_tables':"10", 'routes':"", 'vxlan':"61", 'vlan':""},
'd_test-net28': {'name':"test-net28", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"33", 'routes':"", 'vxlan':"50", 'vlan':""},
'ks_test-net29': {'name':"test-net29", 'Validation':"true", 'NetworkType':"", 'namespace':"kube-system",'host_if':"", 'rt_tables':"34", 'routes':"", 'vxlan':"50", 'vlan':""},
'd_test-net30': {'name':"test-net30", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"10", 'routes':"10.10.0.0/32: 10.10.0.40", 'vxlan':"", 'vlan':""},
}
danmnets_error = {
'd_test-net3': {'name':"test-net3", 'Validation':"false", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"12", 'routes':"", 'vxlan':"51", 'vlan':""},
'd_test-net9': {'name':"test-net9", 'Validation':"false", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"155", 'routes':"", 'vxlan':"55", 'vlan':""},
'd_test-net10': {'name':"test-net10", 'Validation':"false", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"18", 'routes':"", 'vxlan':"56", 'vlan':""},
'd_test-net11': {'name':"test-net11", 'Validation':"false", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"18", 'routes':"", 'vxlan':"55", 'vlan':""},
'd_test-net12': {'name':"test-net12", 'Validation':"false", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"19", 'routes':"", 'vxlan':"55", 'vlan':""},
'd_test-net14': {'name':"test-net14", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"21", 'routes':"", 'vxlan':"", 'vlan':""},
'd_test-net17': {'name':"test-net17", 'Validation':"false", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"24", 'routes':"", 'vxlan':"", 'vlan':"4095"},
'd_test-net18': {'name':"test-net18", 'Validation':"false", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"25", 'routes':"", 'vxlan':"", 'vlan':"4096"},
'd_test-net19': {'name':"test-net19", 'Validation':"true", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"26", 'routes':"", 'vxlan':"", 'vlan':""},
'd_test-net22': {'name':"test-net22", 'Validation':"false", 'NetworkType':"", 'namespace':"default", 'host_if':"", 'rt_tables':"29", 'routes':"", 'vxlan':"16777215", 'vlan':""},
}
cpu_pooling_pod1 = dict(obj_type="pod", obj_name="cpu-pooling-1", namespace="default", obj_count="1")
cpu_pooling_pod2 = dict(obj_type="pod", obj_name="cpu-pooling-2", namespace="default", obj_count="1")
cpu_pooling_pod3 = dict(obj_type="pod", obj_name="cpu-pooling-3", namespace="default", obj_count="1")
cpu_pooling_pod4 = dict(obj_type="pod", obj_name="cpu-pooling-4", namespace="default", obj_count="1")
cpu_pooling_pod5 = dict(obj_type="pod", obj_name="cpu-pooling-5", namespace="default", obj_count="1")
cpu_pooling_pod6 = dict(obj_type="pod", obj_name="cpu-pooling-6", namespace="default", obj_count="1")
cpu_pooling_pod7 = dict(obj_type="pod", obj_name="cpu-pooling-7", namespace="default", obj_count="1")
cpu_pooling_pod8 = dict(obj_type="pod", obj_name="cpu-pooling-8", namespace="default", obj_count="1")
cpu_pooling_pod9 = dict(obj_type="replicaset", obj_name="cpu-pooling-9", namespace="default", obj_count="1")
cpu_pooling_pod10 = dict(obj_type="replicaset", obj_name="cpu-pooling-10", namespace="default", obj_count="1")
cpu_pooling_pod11 = dict(obj_type="replicaset", obj_name="cpu-pooling-11", namespace="default", obj_count="1")
cpu_pooling_setter = dict(obj_type="pod", obj_name="cpu-setter", namespace="kube-system", obj_count="1")
cpu_pooling_cm_name = "cpu-pooler-configmap"
clusternetworks_properties = {
'cnet_01': {'name': 'cnet-01', 'NetworkType': 'ipvlan', 'host_if': '', 'iface_type': 'ext'},
'cnet_02': {'name': 'cnet-02', 'NetworkType': 'ipvlan', 'host_if': 'cnet02.502', 'iface_type': 'ext'},
'cnet_03': {'name': 'cnet-03', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet03', 'iface_type': 'int'},
'cnet_04': {'name': 'cnet-04', 'NetworkType': 'ipvlan', 'host_if': 'cnet04.504', 'iface_type': 'ext'},
'cnet_05': {'name': 'cnet-05', 'NetworkType': 'ipvlan', 'host_if': '', 'iface_type': 'ext'},
'cnet_06': {'name': 'cnet-06', 'NetworkType': 'ipvlan', 'host_if': 'cnet06.506', 'iface_type': 'ext'},
'cnet_07': {'name': 'cnet-07', 'NetworkType': 'ipvlan', 'host_if': '', 'iface_type': 'int'},
'cnet_08': {'name': 'cnet-08', 'NetworkType': 'ipvlan', 'host_if': '', 'iface_type': ''},
'cnet_09': {'name': 'cnet-09', 'NetworkType': 'ipvlan', 'host_if': '', 'iface_type': ''},
}
clusternetworks_error_properties = {
'cnet_invalid_01': {'name': 'cnet-invalid-01'},
'cnet_invalid_02_01': {'name': 'cnet-invalid-02-01'},
'cnet_invalid_02_02': {'name': 'cnet-invalid-02-02'},
'cnet_invalid_03': {'name': 'cnet-invalid-03'},
'cnet_invalid_04_01': {'name': 'cnet-invalid-04-01'},
'cnet_invalid_04_02': {'name': 'cnet-invalid-04-02'},
'cnet_invalid_05': {'name': 'cnet-invalid-05'},
'cnet_invalid_06': {'name': 'cnet-invalid-06'},
'cnet_invalid_07': {'name': 'cnet-invalid-07'},
'cnet_invalid_08': {'name': 'cnet-invalid-08'},
'cnet_invalid_09': {'name': 'cnet-invalid-09'},
'cnet_invalid_10': {'name': 'cnet-invalid-10'},
'cnet_invalid_11': {'name': 'cnet-invalid-11'},
'cnet_invalid_12': {'name': 'cnet-invalid-12'},
}
tenantconfig_properties = {
'tconf_01': {'name': "tconf-01"},
'tconf_02': {'name': "tconf-02"},
'tconf_03': {'name': "tconf-03"},
'tconf_04': {'name': "tconf-04"},
'tconf_05': {'name': "tconf-05"},
'tconf_06': {'name': "tconf-06"},
'tconf_07': {'name': "tconf-07"},
'tconf_08': {'name': "tconf-08"},
}
tenantconfig_error_properties = {
'tconf_invalid_01': {'name':"tconf-invalid-01"},
'tconf_invalid_02': {'name':"tconf-invalid-02"},
'tconf_invalid_03': {'name':"tconf-invalid-03"},
'tconf_invalid_04': {'name':"tconf-invalid-04"},
'tconf_invalid_05': {'name':"tconf-invalid-05"},
'tconf_invalid_06': {'name':"tconf-invalid-06"},
'tconf_invalid_07': {'name':"tconf-invalid-07"},
'tconf_invalid_08': {'name':"tconf-invalid-08"},
'tconf_invalid_09': {'name':"tconf-invalid-09"},
}
tenantnetwork_properties = {
'tennet_01': {'name': "tennet-01", 'NetworkType': 'ipvlan', 'host_if': 'vx_tnet', 'iface_type': 'ext'},
'tennet_02': {'name': "tennet-02", 'NetworkType': 'ipvlan', 'host_if': 'tnet02.1000', 'iface_type': 'int'},
'tennet_03': {'name': "tennet-03", 'NetworkType': 'ipvlan', 'host_if': 'tnet03.1001', 'iface_type': 'int'},
'tennet_04': {'name': "tennet-04", 'NetworkType': 'ipvlan', 'host_if': 'tnet04.2000', 'iface_type': 'storage'},
'tennet_05': {'name': "tennet-05", 'NetworkType': 'ipvlan', 'host_if': 'tnet05.1002', 'iface_type': 'int'},
'tennet_06': {'name': "tennet-06", 'NetworkType': 'ipvlan', 'host_if': 'tnet06.1003', 'iface_type': 'int'},
}
tenantnetwork_error_properties = {
'tennet_invalid_01': {'name': 'tennet-invalid-01'},
'tennet_invalid_02': {'name': 'tennet-invalid-02'},
'tennet_invalid_03_01': {'name': 'tennet-invalid-03-01'},
'tennet_invalid_03_02': {'name': 'tennet-invalid-03-02'},
'tennet_invalid_04_01': {'name': 'tennet-invalid-04-01'},
'tennet_invalid_04_02': {'name': 'tennet-invalid-04-02'},
'tennet_invalid_05': {'name': 'tennet-invalid-05'},
'tennet_invalid_06': {'name': 'tennet-invalid-06'},
'tennet_invalid_07_01': {'name': 'tennet-invalid-07-01'},
'tennet_invalid_07_02': {'name': 'tennet-invalid-07-02'},
'tennet_invalid_08': {'name': 'tennet-invalid-08'},
'tennet_invalid_09': {'name': 'tennet-invalid-09'},
'tennet_invalid_10': {'name': 'tennet-invalid-10'},
'tennet_invalid_11': {'name': 'tennet-invalid-11'},
}
network_attach_properties = {
'cnet_pod1': {'name': 'cnet-pod1', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod1', 'routes':"10.0.0.0/32: 10.5.1.1"},
'cnet_pod2': {'name': 'cnet-pod2', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod2'},
'cnet_pod3': {'name': 'cnet-pod3', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod3'},
'cnet_pod4': {'name': 'cnet-pod4', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod4'},
'cnet_pod5': {'name': 'cnet-pod5', 'NetworkType': 'ipvlan', 'host_if': ''},
'cnet_pod6': {'name': 'cnet-pod6', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod6'},
'cnet_pod7': {'name': 'cnet-pod7', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod7'},
}
tenantnetwork_attach_properties = {
'tennet_attach_01': {'name': 'tennet-attach-01', 'namespace': 'default', 'NetworkType': 'ipvlan', 'host_if': '', 'routes': "10.10.1.0/24: 10.240.1.100"},
'tennet_attach_02': {'name': 'tennet-attach-02', 'namespace': 'kube-system', 'NetworkType': 'ipvlan', 'host_if': '', 'routes':"10.10.2.0/24: 10.240.2.1"},
'tennet_attach_03': {'name': 'tennet-attach-03', 'namespace': 'default', 'NetworkType': 'ipvlan', 'host_if': ''},
'tennet_attach_04': {'name': 'tennet-attach-04', 'namespace': 'default', 'NetworkType': 'ipvlan', 'host_if': '', 'flannel_pool': {'start': '10.244.0.1', 'end': '10.244.255.254'}},
'tennet_attach_05': {'name': 'tennet-attach-05', 'namespace': 'default', 'NetworkType': 'ipvlan', 'host_if': ''},
'tennet_attach_06': {'name': 'tennet-attach-06', 'namespace': 'default', 'NetworkType': 'ipvlan', 'host_if': ''},
'tennet_attach_07': {'name': 'tennet-attach-07', 'namespace': 'default', 'NetworkType': 'ipvlan', 'host_if': ''},
}
tennet_pod1 = dict(obj_type="pod", obj_name="tennet-pod-01", obj_count="4", namespace="default", ip_list=["10.240.1.1", "10.240.1.8", "10.240.1.9", "10.240.1.254"])
tennet_pod2 = dict(obj_type="pod", obj_name="tennet-pod-02", obj_count="4", namespace="default", ip_list=["10.240.1.2", "10.240.1.3", "10.240.1.4", "10.240.1.5", "10.240.1.6", "10.240.1.7"])
tennet_pod3 = dict(obj_type="pod", obj_name="tennet-pod-03", obj_count="4", namespace="default", ip_list=["10.240.1.1", "10.240.1.8", "10.240.1.9", "10.240.1.254"])
tennet_pod4 = dict(obj_type="pod", obj_name="tennet-pod-04", obj_count="5", namespace="kube-system", ip_list=["10.240.2.2", "10.240.2.3", "10.240.2.4", "10.240.2.5", "10.240.2.6"])
tennet_pod5 = dict(obj_type="pod", obj_name="tennet-pod-05", obj_count="1", namespace="kube-system", ip_list=[])
tennet_pod6 = dict(obj_type="pod", obj_name="tennet-pod-06", obj_count="4", namespace="default", ip_list=[])
tennet_pod7 = dict(obj_type="pod", obj_name="tennet-pod-07", obj_count="5", namespace="default", ip_list=[])
tennet_pod8 = dict(obj_type="pod", obj_name="tennet-pod-08", obj_count="1", namespace="default", ip_list=[])
tennet_pod9 = dict(obj_type="pod", obj_name="tennet-pod-09", obj_count="2", namespace="default", ip_list=[])
tennet_pod10 = dict(obj_type="pod", obj_name="tennet-pod-10", obj_count="1", namespace="default", ip_list=[])
tennet_pod11 = dict(obj_type="pod", obj_name="tennet-pod-11", obj_count="1", namespace="default", ip_list=[])
tennet_pod12 = dict(obj_type="pod", obj_name="tennet-pod-12", obj_count="1", namespace="default", ip_list=["10.20.5.101", "10.240.1.1"])
tennet_pod13 = dict(obj_type="pod", obj_name="tennet-pod-13", obj_count="1", namespace="default", ip_list=[])
tennet_pod14 = dict(obj_type="pod", obj_name="tennet-pod-14", obj_count="1", namespace="default", ip_list=["10.20.6.10", "10.240.1.5", "10.20.5.100"])
| [
11748,
28686,
198,
198,
16963,
457,
796,
705,
25,
16963,
457,
32105,
198,
198,
600,
62,
361,
62,
3672,
62,
259,
62,
9654,
25558,
796,
705,
10745,
430,
12,
600,
6,
198,
198,
2301,
796,
28686,
13,
1136,
24330,
10786,
31553,
11537,
198... | 2.158777 | 9,781 |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import uuid
from st2common.models.system.action import ShellCommandAction
from st2common.runners.base import get_metadata as get_runner_metadata
from local_runner.base import BaseLocalShellRunner
from local_runner.base import RUNNER_COMMAND
__all__ = [
'LocalShellCommandRunner',
'get_runner',
'get_metadata'
]
| [
2,
15069,
12131,
383,
23881,
32173,
46665,
13,
198,
2,
15069,
13130,
18111,
27862,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
... | 3.629091 | 275 |
import datetime
current_time = datetime.time()
time_in_sec = int(current_time[0])*3600 + int(current_time[1])*60 + int(current_time[2])
steps = int(input())
time_for_each_step = int(input())
total_time = time_in_sec + steps*time_for_each_step
a = str(datetime.timedelta(seconds=total_time))
az = a[len(a)-7:len(a)]
print(f"Time Arrival: {az}") | [
11748,
4818,
8079,
198,
14421,
62,
2435,
796,
4818,
8079,
13,
2435,
3419,
198,
2435,
62,
259,
62,
2363,
796,
493,
7,
14421,
62,
2435,
58,
15,
12962,
9,
2623,
405,
1343,
493,
7,
14421,
62,
2435,
58,
16,
12962,
9,
1899,
1343,
493,
... | 2.492754 | 138 |
#!/usr/bin/python3
import time
import datetime
import csv
import os
import board
import busio
import adafruit_bme280
OUTPUT_FILE='thp_log.csv'
SAMPLE_INTERVAL=5
i2c = busio.I2C(board.SCL, board.SDA)
sensor = adafruit_bme280.Adafruit_BME280_I2C(i2c)
sensor.mode = adafruit_bme280.MODE_FORCE
sensor.overscan_pressure = adafruit_bme280.OVERSCAN_X16
sensor.overscan_humidity = adafruit_bme280.OVERSCAN_X16
sensor.overscan_temperature = adafruit_bme280.OVERSCAN_X16
with open(OUTPUT_FILE, 'a') as csv_file:
initial_size = os.fstat(csv_file.fileno()).st_size
csvw = csv.DictWriter(csv_file, fieldnames=('datetime', 'temperature', 'pressure', 'humidity'))
if initial_size == 0:
csvw.writeheader()
while True:
degrees = sensor.temperature
pascals = sensor.pressure
humidity = sensor.humidity
csvw.writerow({
'datetime': datetime.datetime.utcnow().isoformat(),
'temperature': degrees,
'pressure': pascals,
'humidity': humidity,
})
time.sleep(SAMPLE_INTERVAL)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
3096,
198,
11748,
1323,
952,
198,
11748,
512,
1878,
4872,
62,
65,
1326,
21033,
198,
198,
26... | 2.193089 | 492 |
from __future__ import print_function
from __future__ import unicode_literals
import time
import re
from tiny_test_fw import DUT, App, TinyFW
from ttfw_bl import BL602App, BL602DUT
@TinyFW.test_method(app=BL602App.BL602App, dut=BL602DUT.BL602TyMbDUT, test_suite_name='benchmark_security_aes_tc')
if __name__ == '__main__':
benchmark_security_aes_tc()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
640,
198,
11748,
302,
198,
198,
6738,
7009,
62,
9288,
62,
44482,
1330,
360,
3843,
11,
2034,
11,
20443,
24160,
... | 2.627737 | 137 |
# -*- coding: utf-8 -*-
from anchorman.elements import create_element
from anchorman.elements import remove_elements
from anchorman.intervals import all_intervals
from anchorman.result import applicables
from anchorman.result import augment_result
from anchorman.settings import get_config
from anchorman.utils import log
from anchorman.utils import set_and_log_level
# from anchorman.utils import timeit, do_profile
# import objgraph
# # print objgraph.show_most_common_types()
# roots = objgraph.get_leaking_objects()
# objgraph.show_most_common_types(objects=roots)
# objgraph.show_refs(roots[:3], refcounts=True, filename='roots.png')
# @timeit
# @do_profile()
def annotate(text, elements, own_validator=None,
config=get_config(include_project_config=False)):
"""Find and annotate elements in text.
Create an invaltree with elements and units of text, validate
the rules to apply elements and augment the text with this result.
"""
set_and_log_level(config['settings']['log_level'])
# log('starting debugging')
units, old_links, soup_string = all_intervals(text, elements, config)
to_be_applied = applicables(units, old_links, config, own_validator)
markup = config['markup']
decorate_markup = markup.get('decorate')
return_applied_links = config['settings'].get('return_applied_links')
if return_applied_links:
rest = [{e[2]: e[3]}
for _, ele in units
for e in ele
if e not in to_be_applied]
rest_anchors = []
if decorate_markup:
rest_anchors = [create_element(e, markup)
for _, ele in units
for e in ele
if e not in to_be_applied]
anchors = [create_element(c, markup, anchor=True) for c in to_be_applied]
# log(soup_string)
# log(soup_string[949:953])
# log('{} of {} to_be_applied'.format(len(to_be_applied), len(elements)))
# apply the items, but start at the end ...its not like horse riding!
text = augment_result(soup_string, anchors + rest_anchors)
# log(text)
if return_applied_links:
applied = [e for a, _, _, _, e in anchors]
return text, applied, rest
# log('end of debugging\n')
return text
def clean(text, config=get_config(include_project_config=False)):
"""Remove elements from text based on mode and markup.
Use config data to identify markup elements in the text and remove them.
:param config:
:param text:
"""
return remove_elements(text, config)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
12619,
26183,
13,
68,
3639,
1330,
2251,
62,
30854,
198,
6738,
12619,
26183,
13,
68,
3639,
1330,
4781,
62,
68,
3639,
198,
6738,
12619,
26183,
13,
3849,
12786,
1330,... | 2.615619 | 986 |
import asyncio
from subprocess import CalledProcessError
from conjureup import controllers, juju
from conjureup.app_config import app
from conjureup.consts import JAAS_DOMAIN
from conjureup.ui.views.interstitial import InterstitialView
from conjureup.ui.views.jaas import JaaSLoginView
_controller_class = JaaSLoginController
| [
11748,
30351,
952,
198,
6738,
850,
14681,
1330,
34099,
18709,
12331,
198,
198,
6738,
11644,
495,
929,
1330,
20624,
11,
7544,
14396,
198,
6738,
11644,
495,
929,
13,
1324,
62,
11250,
1330,
598,
198,
6738,
11644,
495,
929,
13,
1102,
6448,
... | 3.4375 | 96 |
import pickle
import os.path as path
from udaru_anomaly_detection \
import check_length, check_distribution, check_gramma
from udaru_anomaly_detection.tests.generator import generate_resource
dirname = path.dirname(path.realpath(__file__))
modeldir = path.join(dirname, 'models')
train_dataset = list(generate_resource(100, 'test'))
test_dataset = list(generate_resource(5, 'test')) + [
'../../../passwd',
':(){ :|: & };:',
'a',
'a' * 70,
'res::ricky:/sl/jennifersaunders',
'res:/sl/:ricky:/jennifersaunders'
]
with open(path.join(modeldir, 'length-model.pkl'), 'rb') as fp:
length_model = pickle.load(fp)
with open(path.join(modeldir, 'distribution-model.pkl'), 'rb') as fp:
distribution_model = pickle.load(fp)
with open(path.join(modeldir, 'gramma-model.pkl'), 'rb') as fp:
gramma_model = pickle.load(fp)
print('train dataset:')
for resource in train_dataset:
print(f' {resource}')
print('')
print('test dataset:')
for resource in test_dataset:
print(f' {resource}')
print(f' - length: {check_length.validate(length_model, resource)}')
print(f' - distribution: {check_distribution.validate(distribution_model, resource)}')
print(f' - gramma: {check_gramma.validate(gramma_model, resource)}')
print('')
| [
198,
11748,
2298,
293,
198,
11748,
28686,
13,
6978,
355,
3108,
198,
198,
6738,
334,
67,
11493,
62,
272,
24335,
62,
15255,
3213,
3467,
198,
220,
220,
220,
1330,
2198,
62,
13664,
11,
2198,
62,
17080,
3890,
11,
2198,
62,
4546,
2611,
19... | 2.495146 | 515 |
### This file is a part of the Syncpy library.
### Copyright 2015, ISIR / Universite Pierre et Marie Curie (UPMC)
### Main contributor(s): Giovanna Varni, Marie Avril,
### syncpy@isir.upmc.fr
###
### This software is a computer program whose for investigating
### synchrony in a fast and exhaustive way.
###
### This software is governed by the CeCILL-B license under French law
### and abiding by the rules of distribution of free software. You
### can use, modify and/ or redistribute the software under the terms
### of the CeCILL-B license as circulated by CEA, CNRS and INRIA at the
### following URL "http://www.cecill.info".
### As a counterpart to the access to the source code and rights to
### copy, modify and redistribute granted by the license, users are
### provided only with a limited warranty and the software's author,
### the holder of the economic rights, and the successive licensors
### have only limited liability.
###
### In this respect, the user's attention is drawn to the risks
### associated with loading, using, modifying and/or developing or
### reproducing the software by the user in light of its specific
### status of free software, that may mean that it is complicated to
### manipulate, and that also therefore means that it is reserved for
### developers and experienced professionals having in-depth computer
### knowledge. Users are therefore encouraged to load and test the
### software's suitability as regards their requirements in conditions
### enabling the security of their systems and/or data to be ensured
### and, more generally, to use and operate it in the same conditions
### as regards security.
###
### The fact that you are presently reading this means that you have
### had knowledge of the CeCILL-B license and that you accept its terms.
"""
.. moduleauthor:: Giovanna Varni
"""
import numpy as np # For math operation
import pandas as pd # For DataFrame
import matplotlib.pyplot as plt # For plotting
import matplotlib.dates as mdates # For plotting dates and timeFormat
import scipy
from scipy.signal import hilbert
from Method import Method, MethodArgList
class PhaseSynchro_Strobo(Method):
"""
It computes n:m synchronization index lambda_nm by using a stroboscopic approach between two univariate signals x and y
(in pandas DataFrame format).
**Reference :**
M. Rosenblum, A. Pikovsky, J. Kurths, C. Schafer and P. A. Tass. Phase synchronizatio:from theory to practice. In Handbook of Biological Physics,
Elsiever Science, Series Editor A.J. Hoff, Vol. , Neuro-Informatics, Editors: F. Moss and S. Gielen, Chapter 9.
:param n:
it is the integer for the order of synchronization
:type n: int
:param m:
it is the integer for the order of synchronization
:type m: int
:param nbins:
it is the number of bins to be used to build phase distribution
:type nbins: int
"""
argsList = MethodArgList()
argsList.append('n', 1, int, 'it is the integer for the order of synchronization')
argsList.append('m', 1, int, 'it is the integer for the order of synchronization ')
argsList.append('nbins', 10, int, 'it is the number of bins to be used to build phase distribution')
''' Constructor '''
def compute(self, signals):
"""
It computes the synchronization index lambda_nm
:param signals:
array containing the 2 signals as pd.DataFrame
:type signals: list
:returns: dict
-- lambda_mn index
"""
try:
if not (isinstance(signals, list)): raise TypeError("Requires signals be an array")
if len(signals) != 2: raise TypeError("Requires signals be an array of two elements")
except TypeError, err_msg:
raise TypeError(err_msg)
x = signals[0]
y = signals[1]
' Raise error if parameters are not in the correct type '
try :
if not(isinstance(x, pd.DataFrame)) : raise TypeError("Requires x to be a pd.DataFrame")
if not(isinstance(y, pd.DataFrame)) : raise TypeError("Requires y to be a pd.DataFrame")
except TypeError, err_msg:
raise TypeError(err_msg)
return
'Error if x and y are empty or they have a different length'
try :
if (x.shape[0]==0) or (y.shape[0]== 0) : raise ValueError("Empty signal")
if x.shape[0]!=y.shape[0] : raise ValueError("The two signals have different length")
except ValueError, err_msg:
raise ValueError(err_msg)
return
M = x.shape[0]
#computing the analytic signal and the instantaneous phase
x_analytic=hilbert(np.hstack(x.values))
y_analytic=hilbert(np.hstack(y.values))
phx=np.unwrap(scipy.angle(x_analytic))
phy=np.unwrap(scipy.angle(y_analytic))
disc_perc = int(np.floor(phx.shape[0] // 10))
phx_s=phx[disc_perc-1:M-disc_perc]
phy_s=phy[disc_perc-1:M-disc_perc]
bins=np.linspace(0,2*np.pi*self._m,self._nbins)
bins_no=bins.size
try:
if bins_no <= 0 : raise ValueError("Requires bins_no to be a positive scalar")
except ValueError, err_msg:
raise ValueError(err_msg)
return
Lambda=np.array([])
#distribution of of the phase of the first signal
for b in range(0,bins_no-1):
lb=bins[b]
ub=bins[b+1]
mod_phx_s=np.mod(phx_s,2*np.pi*self._m)
indices=np.where(np.all([mod_phx_s<=ub,mod_phx_s>=lb], axis=0))
M=len(indices[0])
eta=np.mod(phy_s[indices[0]], 2*np.pi*self._n)
Lambda_=1.0*np.sum(np.exp(1j*(eta/self._n)))/M
if np.isnan(Lambda_):
Lambda_=0
Lambda=np.append(Lambda,Lambda_)
lambda_nm=1.0*(np.sum(np.abs(Lambda)))/bins_no
result = dict()
result['lambda_nm'] = lambda_nm
return result
@staticmethod
@staticmethod | [
21017,
770,
2393,
318,
257,
636,
286,
262,
35908,
9078,
5888,
13,
198,
21017,
15069,
1853,
11,
3180,
4663,
1220,
26986,
578,
21204,
2123,
20492,
4424,
494,
357,
52,
5868,
34,
8,
198,
21017,
8774,
18920,
7,
82,
2599,
36089,
7697,
569,
... | 2.499396 | 2,485 |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib.auth import views as auth_views
from django.contrib import admin
from core import views as core_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', core_views.home, name='home'),
url(r'^viewchild/$', core_views.viewchild, name='viewchild'),
url(r'^auth_select/$', core_views.auth_member_select, name='auth_member_select'),
url(r'^auth_select/submit$', core_views.auth_member_submit, name='auth_member_submit'),
url(r'^deauth_select/$', core_views.deauth_member_select, name='deauth_member_select'),
url(r'^deauth_select/submit$', core_views.deauth_member_submit, name='deauth_member_submit'),
url(r'^assign_select/$', core_views.assignmed_select, name='assignmed_select'),
url(r'^assign_select/submit$', core_views.assignmed_submit, name='assignmed_submit'),
url(r'^reassign_select/$', core_views.reassign_select, name='reassign_select'),
url(r'^reassign_select/submit$', core_views.reassign_submit, name='reassign_submit'),
url(r'^add$', core_views.addImmunizations, name='addImmun'),
url(r'^add/submit$', core_views.addImmunizations_submit, name='addImmun_submit'),
url(r'^success/$', core_views.success, name='success'),
url(r'^failure/$', core_views.failure, name='failure'),
url(r'^deny/$', core_views.deny, name='deny'),
url(r'^deny/underage$', core_views.underage, name='underage'),
url(r'^newchild/$', core_views.newchild, name='newchild'),
url(r'^update/$', core_views.update, name='update'),
url(r'^login/$', auth_views.login, {'template_name': 'login.html'}, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': 'login'}, name='logout'),
url(r'^signup/$', core_views.signup, name='signup'),
] | [
37811,
28744,
578,
10289,
28373,
198,
464,
4600,
6371,
33279,
82,
63,
1351,
11926,
32336,
284,
5009,
13,
1114,
517,
1321,
3387,
766,
25,
198,
220,
220,
220,
3740,
1378,
31628,
13,
28241,
648,
404,
305,
752,
13,
785,
14,
268,
14,
16,... | 2.627027 | 925 |
"""Sensor support for Wireless Sensor Tags platform."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_MONITORED_CONDITIONS
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DOMAIN as WIRELESSTAG_DOMAIN, SIGNAL_TAG_UPDATE, WirelessTagBaseSensor
_LOGGER = logging.getLogger(__name__)
SENSOR_TEMPERATURE = "temperature"
SENSOR_HUMIDITY = "humidity"
SENSOR_MOISTURE = "moisture"
SENSOR_LIGHT = "light"
SENSOR_TYPES = [SENSOR_TEMPERATURE, SENSOR_HUMIDITY, SENSOR_MOISTURE, SENSOR_LIGHT]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
)
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
platform = hass.data.get(WIRELESSTAG_DOMAIN)
sensors = []
tags = platform.tags
for tag in tags.values():
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
if sensor_type in tag.allowed_sensor_types:
sensors.append(
WirelessTagSensor(platform, tag, sensor_type, hass.config)
)
add_entities(sensors, True)
class WirelessTagSensor(WirelessTagBaseSensor, SensorEntity):
"""Representation of a Sensor."""
def __init__(self, api, tag, sensor_type, config):
"""Initialize a WirelessTag sensor."""
super().__init__(api, tag)
self._sensor_type = sensor_type
self._name = self._tag.name
# I want to see entity_id as:
# sensor.wirelesstag_bedroom_temperature
# and not as sensor.bedroom for temperature and
# sensor.bedroom_2 for humidity
self._entity_id = (
f"sensor.{WIRELESSTAG_DOMAIN}_{self.underscored_name}_{self._sensor_type}"
)
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_TAG_UPDATE.format(self.tag_id, self.tag_manager_mac),
self._update_tag_info_callback,
)
)
@property
def entity_id(self):
"""Overridden version."""
return self._entity_id
@property
def underscored_name(self):
"""Provide name savvy to be used in entity_id name of self."""
return self.name.lower().replace(" ", "_")
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_class(self):
"""Return the class of the sensor."""
return self._sensor_type
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._sensor.unit
@property
def principal_value(self):
"""Return sensor current value."""
return self._sensor.value
@property
def _sensor(self):
"""Return tag sensor entity."""
return self._tag.sensor[self._sensor_type]
@callback
def _update_tag_info_callback(self, event):
"""Handle push notification sent by tag manager."""
_LOGGER.debug("Entity to update state: %s event data: %s", self, event.data)
new_value = self._sensor.value_from_update_event(event.data)
self._state = self.decorate_value(new_value)
self.async_write_ha_state()
| [
37811,
47864,
1104,
329,
24365,
35367,
44789,
3859,
526,
15931,
198,
11748,
18931,
198,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
198,
6738,
1363,
562,
10167,
13,
5589,
3906,
13,
82,
22854,
1330,
9297,
1404,
21389,
62,
50,
3398,
27... | 2.383147 | 1,519 |
import numpy as np
from numpy.random import randn
from util import Activations
| [
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
25120,
1330,
43720,
77,
198,
6738,
7736,
1330,
13144,
602,
198
] | 3.761905 | 21 |
import os
from print_function import consumergroup_usage
# Exemple
# main.py consumergroup list
# main.py consumergroup describe groupe1
KAFKA_HOME = str(os.getenv("KAFKA_HOME"))
KAFKA_BROKERS = str(os.getenv("KAFKA_BROKERS"))
KAFKA_ZOOKEEPER = str(os.getenv("KAFKA_ZOOKEEPER"))
KAFKA_CONNECT = str(os.getenv("KAFKA_BROKERS")) if not os.getenv("KAFKA_CONNECT") else str(os.getenv("KAFKA_CONNECT"))
KAFKA_COMMAND = KAFKA_HOME+"bin/kafka-consumer-groups.sh --zookeeper "+KAFKA_ZOOKEEPER
| [
11748,
28686,
201,
198,
6738,
3601,
62,
8818,
1330,
2784,
6422,
3233,
62,
26060,
201,
198,
201,
198,
2,
1475,
368,
1154,
201,
198,
2,
1388,
13,
9078,
2784,
6422,
3233,
1351,
201,
198,
2,
1388,
13,
9078,
2784,
6422,
3233,
6901,
1132,... | 2.32243 | 214 |
"""A Light designed to have access to other component's private data when is toggled."""
DOMAIN = "light_simple_access"
| [
37811,
32,
4401,
3562,
284,
423,
1895,
284,
584,
7515,
338,
2839,
1366,
618,
318,
284,
1130,
992,
526,
15931,
198,
198,
39170,
29833,
796,
366,
2971,
62,
36439,
62,
15526,
1,
198
] | 3.666667 | 33 |
import asyncio
import re
import argparse
from cmd import Cmd
from distutils.util import strtobool
import bili_statistics
import printer
import notifier
from tasks.utils import UtilsTask
from tasks.bili_console import (
PrintGiftbagsTask,
PrintMedalsTask,
PrintMainBiliDailyJobTask,
PrintLiveBiliDailyJobTask,
PrintMainBiliUserInfoTask,
PrintLiveBiliUserInfoTask,
PrintJudgeTask,
PrintCapsuleTask,
OpenCapsuleTask,
SendDanmuTask,
PrintUserStatusTask
)
from tasks.custom import SendLatiaoTask, BuyLatiaoTask, BuyMedalTask
# https://github.com/python/cpython/blob/3.7/Lib/argparse.py
| [
11748,
30351,
952,
201,
198,
11748,
302,
201,
198,
11748,
1822,
29572,
201,
198,
6738,
23991,
1330,
327,
9132,
201,
198,
6738,
1233,
26791,
13,
22602,
1330,
965,
83,
672,
970,
201,
198,
201,
198,
11748,
275,
2403,
62,
14269,
3969,
201... | 2.569767 | 258 |
import os
import sys
import numpy
numpy.random.seed(42)
with open(os.path.join(sys.argv[1], "dev-other.lst"), "r") as f:
data = [line.strip() for line in f]
for n, seed_val in enumerate([0, 2, 3, 4, 5]):
numpy.random.seed(42 + seed_val)
data = numpy.random.permutation(data)
with open("tts_shuffled_{}.txt".format(n), "w") as fout:
for line in data:
line_new = line.split(" ")
new_tr = numpy.random.permutation(line_new[3:])
fout.write(line + "\n")
fout.write("{}\n".format(" ".join(new_tr)))
| [
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
299,
32152,
628,
198,
77,
32152,
13,
25120,
13,
28826,
7,
3682,
8,
628,
198,
4480,
1280,
7,
418,
13,
6978,
13,
22179,
7,
17597,
13,
853,
85,
58,
16,
4357,
366,
7959,
12,
847,
13,
... | 2.102941 | 272 |
import time
import json
import machine
import os
from network import WLAN
from machine import Pin
from MQTTLib import AWSIoTMQTTClient
###################################################################################################
# CONFIG NETWORK
###################################################################################################
ip = 'EDIT_ME' # IP to set for wipy.
subnet = '255.255.255.0' # Subnet, most likely leave as is.
router = 'EDIT_ME' # Router of network, usually x.x.x.1
dns = '8.8.8.8' # IP of dns to use, 8.8.8.8 is a google dns.
wlanName = 'EDIT_ME' # Name of WLAN to connect to.
wlanPass = 'EDIT_ME' # Pass of WLAN to connect to.
wlanType = WLAN.WPA2 # Type of network security.
wlanAntType = WLAN.EXT_ANT # Specify ext or int antenna of the wipy. WLAN.INT_ANT
###################################################################################################
# CONFIG AWS IOT
###################################################################################################
awsPort = 8883 # Port of AWS IoT endpoint.
awsHost = 'EDIT_ME' # Your x.iot.eu-west-1.amazonaws.com
awsRootCA = '/flash/cert/ca.pem' # Root CA file.
awsClientCert = '/flash/cert/certificate.pem.crt' # Certificate.
awsPrivateKey = '/flash/cert/private.pem.key' # Private key.
clientID = 'EDIT_ME' # ID used when sending messages to AWS IoT.
topic = 'EDIT_ME' # Name of the topic to send messages to.
offlineQueueSize = 0 # 0 disabled.
connectDisconnectTimeout = 10 # After 10 seconds from connecting, disconnect.
###################################################################################################
# CONFIG WATER MONITORING
###################################################################################################
# Setup pins used for water detection.
pinCricital = Pin('P12', mode=Pin.IN, pull=Pin.PULL_UP)
pinWarning = Pin('P11', mode=Pin.IN, pull=Pin.PULL_UP)
# Every .5 sec cNormal, cWarning or cCritical goes up 1, when reaching 10 water level confirmed.
monitorTick = 0.5
# How long in ms device should sleep between meassure runs.
deepSleepTime = 300000 # 300000 = 5 min
############################
### No edit needed below ###
############################
# Count how many loops water has been at a level.
cNormal = 0
cWarning = 0
cCritical = 0
# Track time when water reaches different levels.
timeNormal = time.time() # only for init
timeWarning = time.time() # only for init
timeCritical = time.time() # only for init
timeDiff = time.time() # only for init
timeDiffCritical = time.time() # only for init
timeWeeklyMail = time.time() # only for init
###################################################################################################
# CONVERT SECONDS INTO HOURS, MINUTES AND SECONDS
###################################################################################################
###################################################################################################
# PUBLISH MESSAGE TO MQTT TOPIC
###################################################################################################
###################################################################################################
# WLAN CONNECT / DISCONNECT
###################################################################################################
###################################################################################################
# SAVE AND LOAD
###################################################################################################
# Save int variables to text files as strings.
# Read string values from text files and update variables as int.
###################################################################################################
# MONITOR WATER LEVELS LOOP.
###################################################################################################
# Check and load variables.
Load()
while True:
###################################################################################################
# SEND WEEKLY CHECKIN MAIL
###################################################################################################
if time.time() - timeWeeklyMail > 604800: # 604800 sec = 7 days
timeWeeklyMail = time.time()
PublishMQTT(5)
break
###################################################################################################
# GET PIN STATUS
###################################################################################################
critical = pinCricital()
warning = pinWarning()
###################################################################################################
# WATER LEVEL CRITICAL
###################################################################################################
if critical == False:
cCritical += 1
# Reset other counters to make sure they are always 0.
cWarning = 0
cNormal = 0
# Set time when reached critical and calculate how long it has been since normal.
if cCritical == 10:
timeCritical = time.time()
timeDiff = timeCritical - timeWarning
timeDiffCritical = timeCritical - timeNormal
PublishMQTT(4, TimeStr(timeDiff), TimeStr(timeDiffCritical))
break
if cCritical >= 10:
machine.deepsleep(deepSleepTime)
###################################################################################################
# WATER LEVEL ELEVATED
###################################################################################################
elif warning == False:
cWarning += 1
# Set time when reached elevated.
if cWarning == 10:
timeWarning = time.time()
# Send dropped from critical to elevated message.
if cCritical >= 10 and cWarning >= 10:
timeDiff = timeWarning - timeCritical
cCritical = 0
PublishMQTT(2, TimeStr(timeDiff))
break
# Send reached elevated from normal message.
if cNormal >= 10 and cWarning >= 10:
timeDiff = timeWarning - timeNormal
cNormal = 0
PublishMQTT(3, TimeStr(timeDiff))
break
if cWarning >= 10:
machine.deepsleep(deepSleepTime)
###################################################################################################
# WATER LEVEL NORMAL
###################################################################################################
else:
cNormal += 1
# Set time level reached normal and calculate how long it took to go from elevated to normal.
if cNormal == 10:
timeNormal = time.time()
timeDiff = timeNormal - timeWarning
# Reset counters.
cCritical = 0
cWarning = 0
# Send message level been normal since start of app.
if machine.reset_cause() != machine.DEEPSLEEP_RESET:
PublishMQTT(0, TimeStr(timeDiff))
# Send message level back to normal from elevated.
else:
PublishMQTT(1, TimeStr(timeDiff))
break
if cNormal >= 10:
machine.deepsleep(deepSleepTime)
print("Monitor Update")
time.sleep(monitorTick)
| [
11748,
640,
198,
11748,
33918,
198,
11748,
4572,
198,
11748,
28686,
198,
6738,
3127,
1330,
370,
25697,
198,
6738,
4572,
1330,
13727,
198,
6738,
337,
48,
51,
14990,
571,
1330,
14356,
11584,
78,
15972,
48,
15751,
11792,
628,
198,
29113,
2... | 3.428636 | 2,221 |
host = '127.0.0.1'
user = 'root'
passwd = '12345678'
database ='fangSpider'
TABLENAME = 'loupanindexzzz' | [
4774,
796,
705,
16799,
13,
15,
13,
15,
13,
16,
6,
198,
7220,
796,
705,
15763,
6,
198,
6603,
16993,
796,
705,
10163,
2231,
30924,
6,
198,
48806,
796,
6,
69,
648,
41294,
6,
198,
5603,
9148,
1677,
10067,
796,
705,
75,
280,
6839,
96... | 2.166667 | 48 |
import sys
import pandas as pd
import json
import os
import numpy as np
#opening the json file
f = open(sys.argv[1]+".json", "r")
x=f.read()
# parse x:
data = json.loads(x)
# Creates DataFrame.
df = pd.DataFrame(data)
df = df.fillna(value=np.nan)
f.close()
#print(df)
df.to_stata(sys.argv[2]+'.dta', version=117)
#deleting the json file
os.remove(sys.argv[1]+".json")
| [
11748,
25064,
220,
198,
11748,
19798,
292,
355,
279,
67,
220,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
628,
198,
2,
29443,
262,
33918,
2393,
198,
69,
796,
1280,
7,
17597,
13,
853,
85,
58,
16,
48688,
... | 2.132979 | 188 |
import io
import os
import pytest
from release import cibuild
@pytest.mark.parametrize("version, tag, ok", [
("3.0.0.dev", "", True), # regular snapshot
("3.0.0.dev", "v3.0.0", False), # forgot to remove ".dev" on bump
("3.0.0", "", False), # forgot to re-add ".dev"
("3.0.0", "v4.0.0", False), # version mismatch
("3.0.0", "v3.0.0", True), # regular release
("3.0.0.rc1", "v3.0.0.rc1", False), # non-canonical.
("3.0.0.dev", "anyname", True), # tagged test/dev release
("3.0.0", "3.0.0", False), # tagged, but without v prefix
])
| [
11748,
33245,
198,
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
2650,
1330,
269,
571,
3547,
628,
628,
628,
628,
628,
628,
198,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7203,
9641,
11,
7621,
11,
12876,
... | 2.223485 | 264 |
import calendar
import json
import re
from collections import defaultdict
from heapq import nlargest
from operator import itemgetter
from django import forms
from django.db.models import Count
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.html import strip_tags
from django.views.generic import ListView, DetailView
from django.views.generic.dates import ArchiveIndexView, YearArchiveView, MonthArchiveView
from haystack.forms import SearchForm
from haystack.query import RelatedSearchQuerySet
from haystack.views import SearchView
from popolo.models import Organization
from speeches.models import Section, Speaker, Speech
from speeches.search import SpeakerForm
from legislature.models import Action, Bill
STOPWORDS = frozenset([
# nltk.corpus.stopwords.words('english')
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her',
'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs',
'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those',
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had',
'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if',
'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with',
'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over',
'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where',
'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other',
'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too',
'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now',
# @see https://github.com/rhymeswithcycle/openparliament/blob/master/parliament/text_analysis/frequencymodel.py
"it's", "we're", "we'll", "they're", "can't", "won't", "isn't", "don't", "he's",
"she's", "i'm", "aren't", "government", "house", "committee", "would", "speaker",
"motion", "mr", "mrs", "ms", "member", "minister", "canada", "members", "time",
"prime", "one", "parliament", "us", "bill", "act", "like", "canadians", "people",
"said", "want", "could", "issue", "today", "hon", "order", "party", "canadian",
"think", "also", "new", "get", "many", "say", "look", "country", "legislation",
"law", "department", "two", "day", "days", "madam", "must", "that's", "okay",
"thank", "really", "much", "there's", "yes", "no",
# HTML tags
'sup',
# Nova Scotia
"nova", "scotia", "scotians", "province", "honourable", "premier",
# artifacts
"\ufffd", "n't",
])
r_punctuation = re.compile(r"[^\s\w0-9'’—-]", re.UNICODE)
r_whitespace = re.compile(r'[\s—]+')
debates = DebateArchiveIndexView.as_view()
debates_by_year = DebateYearArchiveView.as_view()
debates_by_month = DebateMonthArchiveView.as_view()
people = SpeakerListView.as_view()
person = SpeakerDetailView.as_view()
person_notices = SpeakerDetailView.as_view(notices=True)
debate = DebateDetailView.as_view()
notices = DebateDetailView.as_view(notices=True)
debate_single_page = DebateDetailView.as_view(paginate_by=None)
notices_single_page = DebateDetailView.as_view(paginate_by=None, notices=True)
bills = BillListView.as_view()
bill = BillDetailView.as_view()
# @see http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form
# @see https://github.com/toastdriven/django-haystack/blob/master/haystack/forms.py
class SpeechForm(SearchForm):
"""
A form with a hidden integer field that searches the speaker ID field
"""
p = forms.IntegerField(required=False, widget=forms.HiddenInput())
speaker = None # the speaker object
# @see http://django-haystack.readthedocs.org/en/latest/searchqueryset_api.html
# @see http://django-haystack.readthedocs.org/en/latest/searchqueryset_api.html#SearchQuerySet.auto_query
# @see http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#views
# @see https://github.com/toastdriven/django-haystack/blob/master/haystack/views.py
| [
11748,
11845,
198,
11748,
33918,
198,
11748,
302,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
24575,
80,
1330,
299,
28209,
198,
6738,
10088,
1330,
2378,
1136,
353,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,... | 2.66939 | 1,591 |
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import JobQueue
from telegram.ext import Job
import time
import requests
import json
import os.path
from VARIABLES import *
updater = Updater(token=TOKEN_VAR, use_context=True)
dispatcher = updater.dispatcher
job_queue = updater.job_queue
url = "https://api.binance.com/api/v3/ticker/price"
currency = {}
#### Handlers SetUp ####
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
start_loop_handler = CommandHandler('start_loop', start_loop)
dispatcher.add_handler(start_loop_handler)
end_loop_handler = CommandHandler('end_loop', end_loop)
dispatcher.add_handler(end_loop_handler)
get_total_handler = CommandHandler('get_total', get_total_USD)
dispatcher.add_handler(get_total_handler)
make_data_handler = CommandHandler('make_data', make_data)
dispatcher.add_handler(make_data_handler)
#### Run ####
updater.start_polling()
updater.idle()
| [
198,
6738,
573,
30536,
13,
2302,
1330,
3205,
67,
729,
198,
6738,
573,
30536,
13,
2302,
1330,
9455,
25060,
198,
6738,
573,
30536,
13,
2302,
1330,
15768,
34991,
198,
6738,
573,
30536,
13,
2302,
1330,
15768,
198,
11748,
640,
198,
11748,
... | 2.881159 | 345 |
import pytest
from ...core.relay import extract_global_id
from ...form.models import Question
from .. import models
@pytest.mark.parametrize(
"case__status,result_count",
[(models.Case.STATUS_RUNNING, 1), (models.Case.STATUS_COMPLETED, 0)],
)
@pytest.mark.parametrize("task__lead_time", [100, None])
@pytest.mark.parametrize("task__address_groups", ['["group-name"]|groups', None])
@pytest.mark.parametrize("mutation", ["startCase", "saveCase"])
@pytest.mark.parametrize(
"work_item__status",
[models.WorkItem.STATUS_COMPLETED, models.WorkItem.STATUS_READY],
)
@pytest.mark.parametrize(
"case__status,success",
[(models.Case.STATUS_RUNNING, True), (models.Case.STATUS_COMPLETED, False)],
)
@pytest.mark.parametrize(
"task__is_multiple_instance,task__address_groups,count",
[(False, ["group1", "group2"], 1), (True, ["group1", "group2"], 2)],
)
| [
11748,
12972,
9288,
198,
198,
6738,
2644,
7295,
13,
2411,
323,
1330,
7925,
62,
20541,
62,
312,
198,
6738,
2644,
687,
13,
27530,
1330,
18233,
198,
6738,
11485,
1330,
4981,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
... | 2.56196 | 347 |
# Generated by Django 4.0.3 on 2022-04-08 10:27
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
604,
13,
15,
13,
18,
319,
33160,
12,
3023,
12,
2919,
838,
25,
1983,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
car_input = input('Welcome, what kind of car would you like to rent?\n' )
print(f"{car_input.upper()}?? sure, we have different models {car_input}'s let me show you!" )
| [
7718,
62,
15414,
796,
5128,
10786,
14618,
11,
644,
1611,
286,
1097,
561,
345,
588,
284,
5602,
30,
59,
77,
6,
1267,
198,
4798,
7,
69,
1,
90,
7718,
62,
15414,
13,
45828,
3419,
92,
3548,
1654,
11,
356,
423,
1180,
4981,
1391,
7718,
... | 3.017857 | 56 |
import os
import sys
import re
import json
import random
import pickle
import numpy as np
if __name__ == "__main__":
tg = TraceGenerator(
use_existing_trace=False,
url_nums=240,
trace_length=2400,
)
trace = tg.generate_new_trace()
with open('data/trace.pickle', 'wb') as fp:
pickle.dump(trace, fp)
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
302,
198,
11748,
33918,
198,
11748,
4738,
198,
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220... | 2.251613 | 155 |
import unittest
from BarCrawlrServer.model.plan import plan
import json
myPlan = "{" +\
"\"name\":\"Alex's Plan\"," +\
"\"places\":[" +\
"{" +\
"\"name\":\"Joe's Bar\"," +\
"\"address\":\"10 King's Street, Burlington, 05401 VT\"," +\
"\"lon\":0.0," +\
"\"lat\":0.0" +\
"}," +\
"{" +\
"\"name\":\"Bob's Bar\"," +\
"\"address\":\"11 King's Street, Burlington, 05401 VT\"," +\
"\"lon\":0.1," +\
"\"lat\":0.1" +\
"}" +\
"]" +\
"}"
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
628,
198,
6738,
2409,
34,
13132,
81,
10697,
13,
19849,
13,
11578,
1330,
1410,
198,
198,
11748,
33918,
198,
198,
1820,
20854,
796,
366,
4895,
1343,
59,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 1.61978 | 455 |
import json
data = {"Fruteria": [ {"Fruta": [ {"Nombre":"Manzana","Cantidad":10}, {"Nombre":"Pera","Cantidad":20}, {"Nombre":"Naranja","Cantidad":30} ] }, {"Verdura": [ {"Nombre":"Lechuga","Cantidad":80}, {"Nombre":"Tomate","Cantidad":15}, {"Nombre":"Pepino","Cantidad":50} ] } ]}
#encoded
data_string = json.dumps(data)
#Decoded
decoded = json.loads(data_string)
print ("Tenemos " +str(decoded["Fruteria"][1]["Verdura"][0]["Cantidad"])+" Lechugas.")
| [
11748,
33918,
198,
198,
7890,
796,
19779,
6732,
315,
5142,
1298,
685,
220,
19779,
6732,
29822,
1298,
220,
220,
685,
220,
220,
220,
19779,
45,
2381,
260,
2404,
5124,
89,
2271,
2430,
34,
415,
32482,
1298,
940,
5512,
220,
220,
220,
19779... | 2.174888 | 223 |
import pandas as pd
import numpy as np
import math
import copy
if __name__ == '__main__':
model = {"BIG_C":1.0,"conts":{"x":[[0,0],[1,1]]}, "cats":[]}
print(penalize_this_cont_col(model,"x", {"contStraight":0.01}))
model = {"BIG_C":1.0,"conts":{"x":[[0,0],[1,1]]}, "cats":[]}
print(penalize_this_cont_col(model,"x", {"contGroup":0.01}))
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
4866,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
2746,
796,
19779,
3483,
38,
62,
34,
1298,
16,
13,
15,
... | 2.293333 | 150 |
from threading import Thread
from src.commands.ForwardSavedStrandAndInformUserCommand import ForwardSavedStrandAndInformUserCommand
from src.commands.SendErrorMessageCommand import SendErrorMessageCommand
from src.commands.SendPleaseInstallMessageCommand import SendPleaseInstallMessageCommand
from src.models.exceptions.exceptions import InvalidSlashCommandException
from src.models.domain.Agent import Agent
from src.models.domain.User import User
from src.services.Service import Service
from src.services.helper.BuildTextFromChannelHistoryService import BuildTextFromChannelHistoryService
from src.services.helper.ConvertTextToGFMService import ConvertTextToGFMService
from src.utilities.database import db_session
| [
6738,
4704,
278,
1330,
14122,
198,
198,
6738,
12351,
13,
9503,
1746,
13,
39746,
50,
9586,
13290,
392,
1870,
818,
687,
12982,
21575,
1330,
19530,
50,
9586,
13290,
392,
1870,
818,
687,
12982,
21575,
198,
6738,
12351,
13,
9503,
1746,
13,
... | 4.096591 | 176 |
import time
import logging
import rethinkdb as r
from bigchaindb import backend
from bigchaindb.backend.exceptions import BackendError
from bigchaindb.backend.changefeed import ChangeFeed
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
logger = logging.getLogger(__name__)
register_changefeed = module_dispatch_registrar(backend.changefeed)
class RethinkDBChangeFeed(ChangeFeed):
"""This class wraps a RethinkDB changefeed as a multipipes Node."""
def run_changefeed(connection, table):
"""Encapsulate operational logic of tailing changefeed from RethinkDB
"""
while True:
try:
for change in connection.run(r.table(table).changes()):
yield change
break
except (BackendError, r.ReqlDriverError) as exc:
logger.exception('Error connecting to the database, retrying')
time.sleep(1)
@register_changefeed(RethinkDBConnection)
def get_changefeed(connection, table, operation, *, prefeed=None):
"""Return a RethinkDB changefeed.
Returns:
An instance of
:class:`~bigchaindb.backend.rethinkdb.RethinkDBChangeFeed`.
"""
return RethinkDBChangeFeed(table, operation, prefeed=prefeed,
connection=connection)
| [
11748,
640,
198,
11748,
18931,
198,
11748,
36437,
9945,
355,
374,
198,
198,
6738,
1263,
7983,
9945,
1330,
30203,
198,
6738,
1263,
7983,
9945,
13,
1891,
437,
13,
1069,
11755,
1330,
5157,
437,
12331,
198,
6738,
1263,
7983,
9945,
13,
1891,... | 2.68254 | 504 |
#!/usr/bin/python
#-*- coding: utf-8 -*-
'''
该部分程序用于判断目标合约是否包含转钱出去的语句
'''
'''
可用工具:slither真是个宝藏工具
slither可能可用的功能:
合约各个函数的调用图
文件中各个合约的继承关系
最子类合约的构造函数执行结果
function-summary里有每个函数读写、内外部调用的总结
human-summary里有对每个合约功能的概述->可以用来判断->不能用来判断,对于Receive ETH而言,只判断payable关键字而不判断合约是否真的可以接收以太币
require显示出每个合约的每个函数中用到的require和assert
最子类合约状态变量的内存情况
对状态变量的写入及对应的auth操作
'''
import subprocess
import os
from inherGraph import inherGraph #该库用于返回主合约的合约名
from colorPrint import * #该头文件中定义了色彩显示的信息
from pydot import io #该头文件用来读取.dot文件
import re
import json
#缓存路径
#进行抽取时,合约仍然存于cache文件夹中
CACHE_PATH = "./cache/"
#transfer标志
TRANSFER_FLAG = "transfer"
#send标志
SEND_FLAG = "send"
#收款地址标志
ADDRESS_PAYABLE_FLAG = "address payable"
#value标志
VALUE_FLAG = "value"
#call标志
CALL_FLAG = "call"
#元组标志
TUPLE_FLAG = "tuple()"
#自毁函数类型
SUICIDE_FUNC_TYPE = "function (address payable)"
#自毁函数名
SUICIDE_FUNC_NAME = "selfdestruct"
#使用注释符号进行无效化
COMMENT_FLAG = 0
#使用矛盾语句进行无效化
INVALID_FLAG = 1
#规定转出金额为0进行无效化
ZERO_FLAG = 2
'''
新无效化方法
transfer/send/call.value语句通过替换转出金额为0来构造bug
selfdestruct语句通过注释
探究下
transfer属于FuntionCall,children[0]就是msg.sender.transfer
'''
class judgePath:
#通过给定的ast,返回转账数额的位置
#根据给定的开始和结束位置,返回包含这一部分的语句
'''
最终决定不用type作为判断依据,因为不同版本的Solidity这几个函数的type是不同的(会导致我们的可用性范围收窄)
'''
#在给定的ast中返回包含键值对"_name": "_value"的字典列表
#self.findASTNode的value,根据regex的版本
#传入:657:17:0
#传出:657, 674
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
46237,
98,
32849,
101,
26344,
228,
163,
101,
233,
41753,
237,
18796,
101,
12859,
236,
26344,
97,
23877,
25... | 1.01107 | 1,355 |
from setuptools import setup, find_packages
from teamleader import __prog__, __version__
setup(
name=__prog__,
version=__version__,
description='python-teamleader',
long_description='Library for accessing the Teamleader API (http://apidocs.teamleader.be/)',
author='Ruben Van den Bossche, Matteo De Wint',
author_email='ruben@novemberfive.co, matteo@novemberfive.co',
url='https://github.com/novemberfiveco/python-teamleader',
download_url='https://github.com/novemberfiveco/python-teamleader/tarball/' + __version__,
packages=['teamleader'],
include_package_data=True,
install_requires=['requests', 'pycountry']
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
6738,
1074,
27940,
1330,
11593,
1676,
70,
834,
11,
11593,
9641,
834,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
28,
834,
1676,
70,
834,
11,
198,
220,
220,
220,
... | 2.883117 | 231 |
"""
Just a dummy package to test again importing modules.
"""
| [
37811,
198,
5703,
257,
31548,
5301,
284,
1332,
757,
33332,
13103,
13,
198,
37811,
198
] | 4.133333 | 15 |
#
# This illustrates how the code_pipeline can be used to run the sample test generator
#
import unittest
from click.testing import CliRunner
if __name__ == '__main__':
unittest.main() | [
2,
198,
2,
770,
21290,
703,
262,
2438,
62,
79,
541,
4470,
460,
307,
973,
284,
1057,
262,
6291,
1332,
17301,
198,
2,
198,
11748,
555,
715,
395,
198,
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
628,
198,
198,
361,
11593,
3672,
834... | 3.183333 | 60 |
#!/usr/bin/env python
import re
import fileinput
import json
import urllib
Lookup = {}
key = raw_input("Please type your gene name, all caps, and press enter: ")
#print var_matches
for line in fileinput.input(['/home/jiawen/data/Homo_sapiens.GRCh37.75.gtf']):
gene_id_matches = re.findall('gene_id \"(.*?)\";',line)
gene_name_matches = re.findall('gene_name \"(.*?)\";',line)
if gene_name_matches:
if gene_id_matches:
Lookup[gene_name_matches[0]] = gene_id_matches[0]
print "The variants within the gene" + key + Lookup[key] + "are: "
# print "key is " + gene_name_matches[0] + "val is " + gene_id_matches[0]
#print "gene", key, Lookup[key]
#alternative method but does not work#
#for line in fileinput.input(['/home/jiawen/data/Homo_sapiens.GRCh37.75.gtf']):
# var_matches = 'gene_id \"(\S*)\"; gene_name \"(.*?)\"\;'
# gene_id = re.findall(var_matches,line)
# print gene_id
#if gene_id == "key":
# print "gene" + str(gene_id)
#print 'The variants within the gene' + key + '(' + gene_id + ') are:'
# with urllib.request.urlopen("http://rest.ensembl.org/overlap/id/"gene_id".json?feature=variation")
# data = json.loads(url.read().decode())
# for consequence_type in data:
# print 'Variant {} is a {}.'.format(data['id'], data['consequence_type'])
#PART 2#
url = "http://rest.ensembl.org/overlap/id/" + Lookup[key] + ".json?feature=variation"
response = urllib.urlopen(url)
data = json.loads(response.read())
#make this part a loop#
for i in range (0, len(data)):
dic = data[i]
id_names = dic['id']
consequence_type = dic['consequence_type']
clinical_result = dic['clinical_significance']
if clinical_result:
print "Variant" + id_names + "is a " + consequence_type.replace("_"," ") + ", and is clinically " + clinical_result[0].upper()
else:
print "Variant" + id_names + "is a " + consequence_type.replace("_"," ") + ".\n"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
302,
198,
11748,
2393,
15414,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
198,
8567,
929,
796,
23884,
198,
2539,
796,
8246,
62,
15414,
7203,
5492,
2099,
534,
9779,
1438,
11,
... | 2.402214 | 813 |
from django.shortcuts import get_object_or_404
import jingo
from projects.models import Project
from topics.models import Topic
def all(request):
"""Show a list of topics."""
# note that we get the list of topics from the bundled context processor
projects = Project.objects.all()
return jingo.render(request, 'topics/all.html', {
'projects': projects,
})
def show(request, slug):
"""Show a specific topic."""
topic = get_object_or_404(Topic, slug=slug)
projects = Project.objects.filter(topics=topic)
request.session['topic'] = topic.name
return jingo.render(request, 'topics/show.html', {
'topic': topic,
'projects': projects
})
def about(request, slug):
"""Show more detailed information about a specific topic."""
topic = get_object_or_404(Topic, slug=slug)
template = 'topics/about.html'
if request.is_ajax():
template = 'topics/ajax/about.html'
return jingo.render(request, template, {
'topic': topic
})
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
26429,
198,
198,
11748,
474,
32735,
198,
198,
6738,
4493,
13,
27530,
1330,
4935,
198,
6738,
10233,
13,
27530,
1330,
47373,
628,
198,
4299,
477,
7,
25927,
2599,
1... | 2.772973 | 370 |
"""
The MIT License (MIT)
Copyright (c) 2016 Swapnil Agarwal
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from copy import deepcopy as copy
QUICK_REPLIES_LIMIT = 11
TITLE_CHARACTER_LIMIT = 20
PAYLOAD_CHARACTER_LIMIT = 1000
| [
37811,
198,
464,
17168,
13789,
357,
36393,
8,
198,
198,
15269,
357,
66,
8,
1584,
48408,
45991,
2449,
283,
16783,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
1659,
428,
3788,
... | 3.622754 | 334 |
# -*- coding: utf-8 -*-
# NOTES:
# - this file contains the CLI implementation for `hddcoin hodl commit`
from __future__ import annotations
import asyncio
import decimal
import pathlib
import sys
import time
import typing as th
import aiohttp
import blspy # type: ignore
import yaml
import hddcoin.hodl
import hddcoin.hodl.exc as exc
import hddcoin.util
from hddcoin.hodl.hodlrpc import HodlRpcClient
from hddcoin.hodl.util import vlog
from hddcoin.hodl.val import validateContract
from .colours import *
from .colours import _
# Set the threshold for minimum useful/recommended fee value
# - this is a rough recommendation based on the minimum fee per cost (see https://bit.ly/31zLWqG)
# - fees smaller than this amount risk getting treated as having zero fee (this is a dust storm
# mitigation thing), and thus not having any prioritization effect
# - the actual minimum depends entirely on how many wallet coins need to be collected and spent to
# make the deposit. If more than a few coins are needed the minmum fee should be higher.
# - since HODL deposits are typically big, and are more likely to require more wallet coins, we
# estimate a little higher than the "normal" fee/cost limits
# - this is only even checked if the client decides to add a fee, which is itself only needed to
# boost the priority of the deposit in the mempool if/when the blockchain is extremely busy
# - at time of writing this, fees are completely unnecessary to deposit (or just transfer) HDD
SMALLEST_USEFUL_FEE_hdd = 0.0001
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
5626,
1546,
25,
198,
2,
220,
532,
428,
2393,
4909,
262,
43749,
7822,
329,
4600,
71,
1860,
3630,
289,
375,
75,
4589,
63,
198,
6738,
11593,
37443,
834,
1330,
37647,
... | 3.484305 | 446 |
from ..params_container import Container
from requests import get
from bs4 import BeautifulSoup
from html import unescape
from ..target import Target
TEST_QUERIES = {'test_single_query': 'walasa',
'test_multi_query': ['walasa', 'yɔrɔ']
}
__doc__ = \
"""
API for Bamana corpus (http://maslinsky.spb.ru/bonito/index.html).
Args:
query: str or List([str]): query or queries (currently only exact search by word or phrase is available)
numResults: int: number of results wanted (100 by default)
kwic: boolean: kwic format (True) or a sentence (False) (True by default)
tag: boolean: whether to collect grammatical tags for target word or not (False by default, available only for corbama-net-non-tonal subcorpus)
subcorpus: str: subcorpus. Available options: 'corbama-net-non-tonal', 'corbama-net-tonal', 'corbama-brut' ('corbama-net-non-tonal' by default)
Main function: extract
Returns:
A generator of Target objects.
"""
| [
6738,
11485,
37266,
62,
34924,
1330,
43101,
198,
6738,
7007,
1330,
651,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
27711,
1330,
555,
41915,
198,
6738,
11485,
16793,
1330,
12744,
198,
198,
51,
6465,
62,
10917,
1137,
11015... | 2.774373 | 359 |
from time import time_ns
from typing import List
from app import App
class Rotator:
'''
Rotator rotates apps vertically on the screen
'''
class Scroller:
'''
Scrolls content right to left at the given speed
'''
| [
6738,
640,
1330,
640,
62,
5907,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
598,
1330,
2034,
198,
198,
4871,
18481,
1352,
25,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
18481,
1352,
5724,
689,
6725,
31677,
319,
262,
3159,
... | 3 | 80 |
from tornasole.exceptions import *
from tornasole.core.utils import get_logger
from tornasole.trials import create_trial
from tornasole.rules.generic import *
import inspect
logger = get_logger()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Rule invoker takes the below arguments and'
'any argument taken by the rules. The arguments not'
'mentioned below are automatically passed when'
'creating the rule objects.')
parser.add_argument('--trial-dir', type=str, required=True)
parser.add_argument('--rule-name', type=str, required=True)
parser.add_argument('--other-trials', type=str,
help='comma separated paths for '
'other trials taken by the rule')
parser.add_argument('--start-step', type=int)
parser.add_argument('--end-step', type=int)
parser.add_argument('--raise-rule-eval-cond-exception',
type=bool, default=False)
parsed, unknown = parser.parse_known_args()
for arg in unknown:
if arg.startswith('--'):
parser.add_argument(arg, type=str)
args = parser.parse_args()
args_dict = vars(args)
r = create_rule(args, args_dict)
invoke_rule(r, start_step=args.start_step, end_step=args.end_step,
raise_eval_cond=args.raise_rule_eval_cond_exception)
| [
6738,
12445,
292,
2305,
13,
1069,
11755,
1330,
1635,
198,
6738,
12445,
292,
2305,
13,
7295,
13,
26791,
1330,
651,
62,
6404,
1362,
198,
6738,
12445,
292,
2305,
13,
28461,
874,
1330,
2251,
62,
45994,
198,
6738,
12445,
292,
2305,
13,
387... | 2.330159 | 630 |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.urls import path
from django.views.generic.base import TemplateView
try:
import debug_toolbar
except ImportError:
pass
import symposion.views
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
from wagtail.images.views.serve import ServeView as WagtailImageView
from conf_site.core.views import csrf_failure
from conf_site.schedule.views import (
ExportPresentationSpeakerView,
PresentationDetailView,
PresentationRedirectView,
)
from conf_site.speakers.views import (
ExportAcceptedSpeakerEmailView,
SpeakerDetailView,
SpeakerListView,
SpeakerRedirectView,
)
from conf_site.sponsorship.views import ExportSponsorsView
WIKI_SLUG = r"(([\w-]{2,})(/[\w-]{2,})*)"
if settings.DEBUG:
urlpatterns = [url(r"^__debug__/", include(debug_toolbar.urls)), ]
else:
urlpatterns = []
urlpatterns += [
url(r"^admin/", admin.site.urls),
url(r"^accounts/", include("allauth.urls")),
url(r"^api/", include("conf_site.api.urls")),
url(r"^cms/", include(wagtailadmin_urls)),
url(r"^dashboard/", symposion.views.dashboard, name="dashboard"),
url(r"^documents/", include(wagtaildocs_urls)),
url(
r"^images/([^/]*)/(\d*)/([^/]*)/[^/]*$",
WagtailImageView.as_view(action="redirect"),
name="wagtailimages_serve",
),
url(r"^speaker/export/$",
staff_member_required(ExportAcceptedSpeakerEmailView.as_view()),
name="speaker_email_export"),
url(r"^speaker/list/$", SpeakerListView.as_view(), name="speaker_list"),
path(
"speaker/profile/<int:pk>/",
SpeakerRedirectView.as_view(),
name="speaker_profile_redirect",
),
path(
"speaker/profile/<int:pk>/<slug:slug>/",
SpeakerDetailView.as_view(),
name="speaker_profile",
),
url(r"^speaker/", include("symposion.speakers.urls")),
url(r"^proposals/", include("conf_site.proposals.urls")),
url(
r"^sponsors/export/$",
staff_member_required(ExportSponsorsView.as_view()),
name="sponsor_export",
),
url(r"^sponsors/", include("symposion.sponsorship.urls")),
url(r"^reviews/", include("conf_site.reviews.urls")),
path(
"schedule/presentation/<int:pk>/",
PresentationRedirectView.as_view(),
name="schedule_presentation_redirect",
),
path(
"schedule/presentation/<int:pk>/<slug:slug>/",
PresentationDetailView.as_view(),
name="schedule_presentation_detail",
),
url(r"^schedule/presentation/export/$",
staff_member_required(ExportPresentationSpeakerView.as_view()),
name="presentation_speaker_export"),
url(r"^schedule/", include("symposion.schedule.urls")),
url(r"^403-csrf/", csrf_failure, name="403-csrf"),
url(r"^413/", TemplateView.as_view(template_name="413.html")),
url(r"", include(wagtail_urls)),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
... | 2.372734 | 1,379 |
import pytest
import numpy as np
| [
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
628,
198
] | 3.181818 | 11 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetApiOperationResult',
'AwaitableGetApiOperationResult',
'get_api_operation',
]
@pulumi.output_type
class GetApiOperationResult:
"""
Api Operation details.
"""
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of the operation. May include HTML formatting tags.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Operation Name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def method(self) -> str:
"""
A Valid HTTP Operation Method. Typical Http Methods like GET, PUT, POST but not limited by only them.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def policies(self) -> Optional[str]:
"""
Operation Policies
"""
return pulumi.get(self, "policies")
@property
@pulumi.getter
def request(self) -> Optional['outputs.RequestContractResponse']:
"""
An entity containing request details.
"""
return pulumi.get(self, "request")
@property
@pulumi.getter
def responses(self) -> Optional[Sequence['outputs.ResponseContractResponse']]:
"""
Array of Operation responses.
"""
return pulumi.get(self, "responses")
@property
@pulumi.getter(name="templateParameters")
def template_parameters(self) -> Optional[Sequence['outputs.ParameterContractResponse']]:
"""
Collection of URL template parameters.
"""
return pulumi.get(self, "template_parameters")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="urlTemplate")
def url_template(self) -> str:
"""
Relative URL template identifying the target resource for this operation. May include parameters. Example: /customers/{cid}/orders/{oid}/?date={date}
"""
return pulumi.get(self, "url_template")
# pylint: disable=using-constant-test
def get_api_operation(api_id: Optional[str] = None,
operation_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiOperationResult:
"""
Api Operation details.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str operation_id: Operation identifier within an API. Must be unique in the current API Management service instance.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['apiId'] = api_id
__args__['operationId'] = operation_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20191201:getApiOperation', __args__, opts=opts, typ=GetApiOperationResult).value
return AwaitableGetApiOperationResult(
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
method=__ret__.method,
name=__ret__.name,
policies=__ret__.policies,
request=__ret__.request,
responses=__ret__.responses,
template_parameters=__ret__.template_parameters,
type=__ret__.type,
url_template=__ret__.url_template)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
26144,
35986,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 2.491172 | 1,869 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for model_search.registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from model_search import registry
@registry.register(DecoratorBase)
@registry.register(DecoratorBase, lookup_name="alias")
@registry.register(base_method)
@registry.register(TestBase, lookup_name="OUR_TEST", init_args={"a": True})
if __name__ == "__main__":
absltest.main()
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.416404 | 317 |
from bs4 import BeautifulSoup as bs
import sys,glob
# test_i = open(sys.argv[1])
for i in glob.glob('*_META_ORG'):
newdata = get_contracts(open(i))
#print newdata
with open(i+"CN.xml", "w") as output:
output.write(' '.join(str(doc) for doc in newdata))
# [1] in 2008 and 2009 are <proc> and <natnotice> often the same, except of cases where natnotice is 2 = additional info.
# In 2010, <proc> holds information on procedure (open, negotiated, etc.), which of course makes more sense.
| [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
355,
275,
82,
198,
11748,
25064,
11,
4743,
672,
198,
198,
2,
1332,
62,
72,
796,
1280,
7,
17597,
13,
853,
85,
58,
16,
12962,
198,
198,
1640,
1312,
287,
15095,
13,
4743,
672,
10786,
9,
62... | 2.819209 | 177 |
#/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Guy Serbin"
__version__ = "1.0.4"
__all__ = ["imageprocess", "ENVIfile", "modistools"]
__main__ = "imageprocess"
| [
2,
14,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
366,
31080,
2930,
8800,
1,
198,
834,
9641,
834,
796,
366,
16,
13,
15,
13,
19,
1,
198,
834,
439,
... | 2.297297 | 74 |
from importlib import reload
from . import config, plugin
__version__ = "1.0.0"
__author__ = "Paul Townsend <pault@pault.org>"
__url__ = "https://github.com/paultownsend/supybot-soccer"
# In case we're being reloaded.
reload(config)
reload(plugin)
Class = plugin.Class
configure = config.configure
| [
6738,
1330,
8019,
1330,
18126,
198,
198,
6738,
764,
1330,
4566,
11,
13877,
198,
198,
834,
9641,
834,
796,
366,
16,
13,
15,
13,
15,
1,
198,
834,
9800,
834,
796,
366,
12041,
46811,
1279,
79,
1721,
31,
79,
1721,
13,
2398,
24618,
198,... | 2.849057 | 106 |
from abc import ABC
from typing import Iterable
import pandas as pd
from ntiles import plotter
from ntiles.tears.base_tear import BaseTear
from ntiles import utils
class ICTear(BaseTear, ABC):
"""
Computes IC from the given factor and returns
Currently will only measure IC for days a company is in the universe
Example: AAPl is in the univere on 1/10 but not in universe on 11/10 if we have greater than 10 day holding period
that asset wint count in the IC calculation
"""
def __init__(self, factor_data: pd.DataFrame, daily_returns: pd.DataFrame, holding_period: int):
"""
:param factor_data: factor data to look at must be from Ntiles
:param daily_returns: daily returns we are calculating the IC on must be from Ntiles
:param holding_period: Holding period we are calculating IC for
"""
super().__init__()
self.factor_data = factor_data
self.daily_returns = daily_returns
self.holding_period = holding_period
self.daily_ic = None
self.ic_stats = None
#
# Calculation
#
def compute_daily_ic(self) -> None:
"""
calculates and sets the daily IC for the holding period
:return: None
"""
self.factor_data.index.names = ['date', 'id']
# slicing off factor values we dont have forward return data for
factor_unstacked = self.factor_data['factor'].unstack()#.iloc[:-self.holding_period]
forward_returns = self.compute_forward_returns().reindex_like(factor_unstacked)
ic_array = utils.correlation_2d(factor_unstacked.to_numpy(), forward_returns.to_numpy())
self.daily_ic = pd.Series(ic_array, index=forward_returns.index)
def compute_forward_returns(self) -> pd.DataFrame:
"""
Calculates self.holding_period forward returns from daily returns
:return: index: date; columns: asset; values: self.holding_period forward returns
"""
# must mad extra day due to cumprod making first date nan
daily_ret = self.daily_returns # utils.pad_extra_day(self.daily_returns, 0)
return daily_ret.add(1).cumprod().pct_change(self.holding_period).shift(-self.holding_period)
def calculate_ic_table(self) -> None:
"""
calculates summary stats for the IC data
:return: None, sets self.ic_stats
"""
mean_ic = self.daily_ic.mean()
std_ic = self.daily_ic.std()
stats = {
'IC Mean': mean_ic,
'IC Median': self.daily_ic.median(),
'IC Std': std_ic,
'Risk Adjusted IC': mean_ic / std_ic,
'IC Skew': self.daily_ic.skew()
}
self.ic_stats = pd.Series(stats).round(3).to_frame(f'{self.holding_period}D').transpose()
#
# Plotting
#
def plot(self) -> None:
"""
plots the IC data in self.daily_ic
:return: None
"""
print('Information Coefficient')
plotter.render_table(self.ic_stats)
plotter.plot_timeseries_ic(self.daily_ic, self.holding_period)
# plotter.plot_ic_qq(self.daily_ic)
# plotter.plot_ic_hist(self.daily_ic)
#
# To clipboard functions
#
def ic_to_clipboard(self) -> None:
"""
writes ic to the clipboard
:return: None
"""
self.daily_ic.to_clipboard()
class ICHorizonTear(BaseTear, ABC):
"""
Computes the IC horizon tear
Will give insight into optimal holding periods for the factor
"""
def __init__(self, factor_data: pd.DataFrame, daily_returns: pd.DataFrame, intervals: Iterable[int],
show_individual):
"""
:param factor_data: The factor values being tested, must be from Ntiles
:param daily_returns: matrix of returns from Ntiles
:param intervals: an iterable that contains the holding periods we would like to make the IC frontier for
"""
super().__init__()
self._factor_data = factor_data
self._daily_returns = daily_returns
self._intervals = sorted(list(intervals))
self._show_individual = show_individual
self.tears = {}
self._ic_horizon = None
def compute(self) -> None:
"""
runs a IC tear for all the periods we want to test over
"""
for interval in self._intervals:
self.tears[interval] = ICTear(self._factor_data, self._daily_returns, interval)
self.tears[interval].compute()
self._ic_horizon = pd.concat([tear.ic_stats for tear in self.tears.values()])
def plot(self) -> None:
"""
plots the IC frontier and the Time series IC
"""
plotter.plot_ic_horizon(self._ic_horizon.drop(['IC Skew'], axis=1))
plotter.render_table(self._ic_horizon)
if self._show_individual:
for ic_tear in self.tears.values():
ic_tear.plot()
| [
6738,
450,
66,
1330,
9738,
198,
6738,
19720,
1330,
40806,
540,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
299,
83,
2915,
1330,
7110,
353,
198,
6738,
299,
83,
2915,
13,
83,
4127,
13,
8692,
62,
83,
451,
1330,
7308,
5... | 2.38395 | 2,081 |
from app.celery import celery
from app.integrations import tg
@celery.task
| [
6738,
598,
13,
7015,
88,
1330,
18725,
1924,
198,
6738,
598,
13,
18908,
9143,
1330,
256,
70,
628,
198,
31,
7015,
88,
13,
35943,
198
] | 3.08 | 25 |
"""
Module for /api/account method
"""
import uuid
from datetime import datetime, timedelta
from typing import Dict, Tuple
from flask_restful import Resource, reqparse
from werkzeug.exceptions import abort
from api.endpoints.accounts.auth import authorization_validator
from api.endpoints.accounts.model import Account
from api.endpoints.utils import add_auth_argument, add_common_arguments, log_context
| [
37811,
198,
26796,
329,
1220,
15042,
14,
23317,
2446,
198,
37811,
198,
11748,
334,
27112,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
19720,
1330,
360,
713,
11,
309,
29291,
198,
198,
6738,
42903,
62,
2118,
913,... | 3.60177 | 113 |
# -*- coding: utf-8 -*-
from clare.common.messaging import consumer
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
537,
533,
13,
11321,
13,
37348,
3039,
1330,
7172,
628,
198
] | 2.62963 | 27 |
#
# Copyright (c), 2018-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module contains base classes and helper functions for defining Pratt parsers.
"""
import sys
import re
from abc import ABCMeta
from unicodedata import name as unicode_name
from decimal import Decimal, DecimalException
from typing import Any, cast, overload, no_type_check_decorator, Callable, \
ClassVar, FrozenSet, Dict, Generic, List, Optional, Union, Tuple, Type, \
Pattern, Match, MutableMapping, MutableSequence, Iterator, Set, TypeVar
from .datatypes import AtomicValueType
#
# Simple top-down parser based on Vaughan Pratt's algorithm (Top Down Operator Precedence).
#
# References:
#
# https://tdop.github.io/ (Vaughan R. Pratt's "Top Down Operator Precedence" - 1973)
# http://crockford.com/javascript/tdop/tdop.html (Douglas Crockford - 2007)
# http://effbot.org/zone/simple-top-down-parsing.htm (Fredrik Lundh - 2008)
#
# This implementation is based on a base class for tokens and a base class for parsers.
# A real parser is built with a derivation of the base parser class followed by the
# registrations of token classes for the symbols of the language.
#
# A parser can be extended by derivation, copying the reusable token classes and
# defining the additional ones. See the files xpath1_parser.py and xpath2_parser.py
# for a full implementation example of a real parser.
#
# Parser special symbols set, that includes the special symbols of TDOP plus two
# additional special symbols for managing invalid literals and unknown symbols
# and source start.
SPECIAL_SYMBOLS = frozenset((
'(start)', '(end)', '(string)', '(float)', '(decimal)',
'(integer)', '(name)', '(invalid)', '(unknown)',
))
class ParseError(SyntaxError):
"""An error when parsing source with TDOP parser."""
def _symbol_to_classname(symbol: str) -> str:
"""
Converts a symbol string to an identifier (only alphanumeric and '_').
"""
if symbol.isalnum():
return symbol.title()
elif symbol in SPECIAL_SYMBOLS:
return symbol[1:-1].title()
elif all(c in '-_' for c in symbol):
value = ' '.join(unicode_name(c) for c in symbol)
return value.title().replace(' ', '').replace('-', '').replace('_', '')
value = symbol.replace('-', '_')
if value.isidentifier():
return value.title().replace('_', '')
value = ''.join(get_id_name(c) for c in symbol)
return value.replace(' ', '').replace('-', '').replace('_', '')
class MultiLabel:
"""
Helper class for defining multi-value label for tokens. Useful when a symbol has more roles.
A label of this type has equivalence with each of its values.
Example:
label = MultiLabel('function', 'operator')
label == 'symbol' # False
label == 'function' # True
label == 'operator' # True
"""
TK = TypeVar('TK', bound='Token[Any]')
class Token(MutableSequence[TK]):
"""
Token base class for defining a parser based on Pratt's method.
Each token instance is a list-like object. The number of token's items is
the arity of the represented operator, where token's items are the operands.
Nullary operators are used for symbols, names and literals. Tokens with items
represent the other operators (unary, binary and so on).
Each token class has a *symbol*, a lbp (left binding power) value and a rbp
(right binding power) value, that are used in the sense described by the
Pratt's method. This implementation of Pratt tokens includes two extra
attributes, *pattern* and *label*, that can be used to simplify the parsing
of symbols in a concrete parser.
:param parser: The parser instance that creates the token instance.
:param value: The token value. If not provided defaults to token symbol.
:cvar symbol: the symbol of the token class.
:cvar lbp: Pratt's left binding power, defaults to 0.
:cvar rbp: Pratt's right binding power, defaults to 0.
:cvar pattern: the regex pattern used for the token class. Defaults to the \
escaped symbol. Can be customized to match more detailed conditions (eg. a \
function with its left round bracket), in order to simplify the related code.
:cvar label: defines the typology of the token class. Its value is used in \
representations of the token instance and can be used to restrict code choices \
without more complicated analysis. The label value can be set as needed by the \
parser implementation (eg. 'function', 'axis', 'constructor function' are used by \
the XPath parsers). In the base parser class defaults to 'symbol' with 'literal' \
and 'operator' as possible alternatives. If set by a tuple of values the token \
class label is transformed to a multi-value label, that means the token class can \
covers multiple roles (eg. as XPath function or axis). In those cases the definitive \
role is defined at parse time (nud and/or led methods) after the token instance creation.
"""
lbp: int = 0 # left binding power
rbp: int = 0 # right binding power
symbol: str = '' # the token identifier
label: str = 'symbol' # optional label
pattern: Optional[str] = None # a custom regex pattern for building the tokenizer
__slots__ = '_items', 'parser', 'value', '_source', 'span'
_items: List[TK]
parser: 'Parser[Token[TK]]'
value: Optional[AtomicValueType]
_source: str
span: Tuple[int, int]
@overload
@overload
@property
@property
def tree(self) -> str:
"""Returns a tree representation string."""
if self.symbol == '(name)':
return '(%s)' % self.value
elif self.symbol in SPECIAL_SYMBOLS:
return '(%r)' % self.value
elif self.symbol == '(':
if not self:
return '()'
elif len(self) == 1:
return self[0].tree
return '(%s)' % ' '.join(item.tree for item in self)
elif not self:
return '(%s)' % self.symbol
else:
return '(%s %s)' % (self.symbol, ' '.join(item.tree for item in self))
@property
def source(self) -> str:
"""Returns the source representation string."""
symbol = self.symbol
if symbol == '(name)':
return cast(str, self.value)
elif symbol == '(decimal)':
return str(self.value)
elif symbol in SPECIAL_SYMBOLS:
return repr(self.value)
else:
length = len(self)
if not length:
return symbol
elif length == 1:
if 'postfix' in self.label:
return '%s %s' % (self[0].source, symbol)
return '%s %s' % (symbol, self[0].source)
elif length == 2:
return '%s %s %s' % (self[0].source, symbol, self[1].source)
else:
return '%s %s' % (symbol, ' '.join(item.source for item in self))
@property
def position(self) -> Tuple[int, int]:
"""A tuple with the position of the token in terms of line and column."""
token_index = self.span[0]
line = self._source[:token_index].count('\n') + 1
if line == 1:
return 1, token_index + 1
return line, token_index - self._source[:token_index].rindex('\n')
def nud(self) -> TK:
"""Pratt's null denotation method"""
raise self.wrong_syntax()
def led(self, left: TK) -> TK:
"""Pratt's left denotation method"""
raise self.wrong_syntax()
def evaluate(self) -> Any:
"""Evaluation method"""
return self.value
def iter(self, *symbols: str) -> Iterator['Token[TK]']:
"""Returns a generator for iterating the token's tree."""
status: List[Tuple[Optional['Token[TK]'], Iterator['Token[TK]']]] = []
parent: Optional['Token[TK]'] = self
children: Iterator['Token[TK]'] = iter(self)
tk: 'Token[TK]'
while True:
try:
tk = next(children)
except StopIteration:
try:
parent, children = status.pop()
except IndexError:
if parent is not None:
if not symbols or parent.symbol in symbols:
yield parent
return
else:
if parent is not None:
if not symbols or parent.symbol in symbols:
yield parent
parent = None
else:
if parent is not None and len(parent._items) == 1:
if not symbols or parent.symbol in symbols:
yield parent
parent = None
if not tk._items:
if not symbols or tk.symbol in symbols:
yield tk
if parent is not None:
if not symbols or parent.symbol in symbols:
yield parent
parent = None
continue
status.append((parent, children))
parent, children = tk, iter(tk)
TK_co = TypeVar('TK_co', bound=Token[Any], covariant=True)
class Parser(Generic[TK_co], metaclass=ParserMeta):
"""
Parser class for implementing a Top Down Operator Precedence parser.
:cvar SYMBOLS: the symbols of the definable tokens for the parser. In the base class it's an \
immutable set that contains the symbols for special tokens (literals, names and end-token).\
Has to be extended in a concrete parser adding all the symbols of the language.
:cvar symbol_table: a dictionary that stores the token classes defined for the language.
:type symbol_table: dict
:cvar token_base_class: the base class for creating language's token classes.
:type token_base_class: Token
:cvar tokenizer: the language tokenizer compiled regexp.
"""
SYMBOLS: ClassVar[FrozenSet[str]] = SPECIAL_SYMBOLS
token_base_class = Token
tokenizer: Optional[Pattern[str]] = None
symbol_table: MutableMapping[str, Type[TK_co]] = {}
_start_token: TK_co
source: str
tokens: Iterator[str]
token: TK_co
next_token: TK_co
next_match: Optional[Match[str]]
literals_pattern: Pattern[str]
name_pattern: Pattern[str]
__slots__ = 'source', 'tokens', 'next_match', '_start_token', 'token', 'next_token'
def parse(self, source: str) -> TK_co:
"""
Parses a source code of the formal language. This is the main method that has to be
called for a parser's instance.
:param source: The source string.
:return: The root of the token's tree that parse the source.
"""
assert self.tokenizer, "Parser tokenizer is not built!"
try:
try:
self.tokens = iter(cast(Iterator[str], self.tokenizer.finditer(source)))
except TypeError as err:
token = self.symbol_table['(invalid)'](self, source)
raise token.wrong_syntax('invalid source type, {}'.format(err))
self.source = source
self.advance()
root_token = self.expression()
self.next_token.expected('(end)')
return root_token
finally:
self.tokens = iter(())
self.next_match = None
self.token = self.next_token = self._start_token
def advance(self, *symbols: str) -> TK_co:
"""
The Pratt's function for advancing to next token.
:param symbols: Optional arguments tuple. If not empty one of the provided \
symbols is expected. If the next token's symbol differs the parser raises a \
parse error.
:return: The next token instance.
"""
value: Any
if self.next_token.symbol == '(end)' or \
symbols and self.next_token.symbol not in symbols:
raise self.next_token.wrong_syntax()
self.token = self.next_token
while True:
try:
self.next_match = cast(Match[str], next(self.tokens))
except StopIteration:
self.next_token = self.symbol_table['(end)'](self)
break
else:
literal, symbol, name, unknown = self.next_match.groups()
if symbol is not None:
try:
self.next_token = self.symbol_table[symbol](self)
except KeyError:
if self.name_pattern.match(symbol) is None:
self.next_token = self.symbol_table['(unknown)'](self, symbol)
raise self.next_token.wrong_syntax()
self.next_token = self.symbol_table['(name)'](self, symbol)
break
elif literal is not None:
if literal[0] in '\'"':
value = self.unescape(literal)
self.next_token = self.symbol_table['(string)'](self, value)
elif 'e' in literal or 'E' in literal:
try:
value = float(literal)
except ValueError as err:
self.next_token = self.symbol_table['(invalid)'](self, literal)
raise self.next_token.wrong_syntax(message=str(err))
else:
self.next_token = self.symbol_table['(float)'](self, value)
elif '.' in literal:
try:
value = Decimal(literal)
except DecimalException as err:
self.next_token = self.symbol_table['(invalid)'](self, literal)
raise self.next_token.wrong_syntax(message=str(err))
else:
self.next_token = self.symbol_table['(decimal)'](self, value)
else:
self.next_token = self.symbol_table['(integer)'](self, int(literal))
break
elif name is not None:
self.next_token = self.symbol_table['(name)'](self, name)
break
elif unknown is not None:
self.next_token = self.symbol_table['(unknown)'](self, unknown)
break
elif str(self.next_match.group()).strip():
msg = "unexpected matching %r: incompatible tokenizer"
raise RuntimeError(msg % self.next_match.group())
return self.next_token
def advance_until(self, *stop_symbols: str) -> str:
"""
Advances until one of the symbols is found or the end of source is reached,
returning the raw source string placed before. Useful for raw parsing of
comments and references enclosed between specific symbols.
:param stop_symbols: The symbols that have to be found for stopping advance.
:return: The source string chunk enclosed between the initial position \
and the first stop symbol.
"""
if not stop_symbols:
raise self.next_token.wrong_type("at least a stop symbol required!")
elif self.next_token.symbol == '(end)':
raise self.next_token.wrong_syntax()
self.token = self.next_token
source_chunk: List[str] = []
while True:
try:
self.next_match = cast(Match[str], next(self.tokens))
except StopIteration:
self.next_token = self.symbol_table['(end)'](self)
break
else:
symbol = self.next_match.group(2)
if symbol is not None:
symbol = symbol.strip()
if symbol not in stop_symbols:
source_chunk.append(symbol)
else:
try:
self.next_token = self.symbol_table[symbol](self)
break
except KeyError:
self.next_token = self.symbol_table['(unknown)'](self)
raise self.next_token.wrong_syntax()
else:
source_chunk.append(self.next_match.group())
return ''.join(source_chunk)
def expression(self, rbp: int = 0) -> TK_co:
"""
Pratt's function for parsing an expression. It calls token.nud() and then advances
until the right binding power is less the left binding power of the next
token, invoking the led() method on the following token.
:param rbp: right binding power for the expression.
:return: left token.
"""
token = self.next_token
self.advance()
left = token.nud()
while rbp < self.next_token.lbp:
token = self.next_token
self.advance()
left = token.led(left)
return cast(TK_co, left)
@property
def position(self) -> Tuple[int, int]:
"""Property that returns the current line and column indexes."""
return self.token.position
def is_source_start(self) -> bool:
"""
Returns `True` if the parser is positioned at the start
of the source, ignoring the spaces.
"""
return not bool(self.source[0:self.token.span[0]].strip())
def is_line_start(self) -> bool:
"""
Returns `True` if the parser is positioned at the start
of a source line, ignoring the spaces.
"""
token_index = self.token.span[0]
try:
line_start = self.source[:token_index].rindex('\n') + 1
except ValueError:
return not bool(self.source[:token_index].strip())
else:
return not bool(self.source[line_start:token_index].strip())
def is_spaced(self, before: bool = True, after: bool = True) -> bool:
"""
Returns `True` if the source has an extra space (whitespace, tab or newline)
immediately before or after the current position of the parser.
:param before: if `True` considers also the extra spaces before \
the current token symbol.
:param after: if `True` considers also the extra spaces after \
the current token symbol.
"""
start, end = self.token.span
try:
if before and start > 0 and self.source[start - 1] in ' \t\n':
return True
return after and self.source[end] in ' \t\n'
except IndexError:
return False
@staticmethod
@classmethod
def register(cls, symbol: Union[str, Type[TK_co]], **kwargs: Any) -> Type[TK_co]:
"""
Register/update a token class in the symbol table.
:param symbol: The identifier symbol for a new class or an existent token class.
:param kwargs: Optional attributes/methods for the token class.
:return: A token class.
"""
if isinstance(symbol, str):
if ' ' in symbol:
raise ValueError("%r: a symbol can't contain whitespaces" % symbol)
try:
token_class = cls.symbol_table[symbol]
except KeyError:
# Register a new symbol and create a new custom class. The new class
# name is registered at parser class's module level.
if symbol not in cls.SYMBOLS:
if symbol != '(start)': # for backward compatibility
raise NameError('%r is not a symbol of the parser %r.' % (symbol, cls))
kwargs['symbol'] = symbol
label = kwargs.get('label', 'symbol')
if isinstance(label, tuple):
label = kwargs['label'] = MultiLabel(*label)
token_class_name = "_{}{}".format(
_symbol_to_classname(symbol), str(label).title().replace(' ', '')
)
token_class_bases = kwargs.get('bases', (cls.token_base_class,))
kwargs.update({
'__module__': cls.__module__,
'__qualname__': token_class_name,
'__return__': None
})
token_class = cast(
Type[TK_co], ABCMeta(token_class_name, token_class_bases, kwargs)
)
cls.symbol_table[symbol] = token_class
MutableSequence.register(token_class)
setattr(sys.modules[cls.__module__], token_class_name, token_class)
elif not isinstance(symbol, type) or not issubclass(symbol, Token):
raise TypeError("A string or a %r subclass requested, not %r." % (Token, symbol))
else:
token_class = symbol
if cls.symbol_table.get(symbol.symbol) is not token_class:
raise ValueError("Token class %r is not registered." % token_class)
for key, value in kwargs.items():
if key == 'lbp' and value > token_class.lbp:
token_class.lbp = value
elif key == 'rbp' and value > token_class.rbp:
token_class.rbp = value
elif callable(value):
setattr(token_class, key, value)
return token_class
@classmethod
def unregister(cls, symbol: str) -> None:
"""Unregister a token class from the symbol table."""
del cls.symbol_table[symbol.strip()]
@classmethod
def duplicate(cls, symbol: str, new_symbol: str, **kwargs: Any) -> Type[TK_co]:
"""Duplicate a token class with a new symbol."""
token_class = cls.symbol_table[symbol]
new_token_class = cls.register(new_symbol, **kwargs)
for key, value in token_class.__dict__.items():
if key in kwargs or key in ('symbol', 'pattern') or key.startswith('_'):
continue
setattr(new_token_class, key, value)
return new_token_class
@classmethod
def literal(cls, symbol: str, bp: int = 0) -> Type[TK_co]:
"""Register a token for a symbol that represents a *literal*."""
return cls.register(symbol, label='literal', lbp=bp, evaluate=evaluate, nud=nud)
@classmethod
def nullary(cls, symbol: str, bp: int = 0) -> Type[TK_co]:
"""Register a token for a symbol that represents a *nullary* operator."""
return cls.register(symbol, label='operator', lbp=bp, nud=nud)
@classmethod
def prefix(cls, symbol: str, bp: int = 0) -> Type[TK_co]:
"""Register a token for a symbol that represents a *prefix* unary operator."""
return cls.register(symbol, label='prefix operator', lbp=bp, rbp=bp, nud=nud)
@classmethod
def postfix(cls, symbol: str, bp: int = 0) -> Type[TK_co]:
"""Register a token for a symbol that represents a *postfix* unary operator."""
return cls.register(symbol, label='postfix operator', lbp=bp, rbp=bp, led=led)
@classmethod
def infix(cls, symbol: str, bp: int = 0) -> Type[TK_co]:
"""Register a token for a symbol that represents an *infix* binary operator."""
return cls.register(symbol, label='operator', lbp=bp, rbp=bp, led=led)
@classmethod
def infixr(cls, symbol: str, bp: int = 0) -> Type[TK_co]:
"""Register a token for a symbol that represents an *infixr* binary operator."""
return cls.register(symbol, label='operator', lbp=bp, rbp=bp - 1, led=led)
@classmethod
def method(cls, symbol: Union[str, Type[TK_co]], bp: int = 0) \
-> Callable[[Callable[..., Any]], Callable[..., Any]]:
"""
Register a token for a symbol that represents a custom operator or redefine
a method for an existing token.
"""
token_class = cls.register(symbol, label='operator', lbp=bp, rbp=bp)
@no_type_check_decorator
return bind
@classmethod
def build(cls) -> None:
"""
Builds the parser class. Checks if all declared symbols are defined
and builds a the regex tokenizer using the symbol related patterns.
"""
# For backward compatibility with external defined parsers
if '(start)' not in cls.symbol_table:
cls.register('(start)')
if not cls.SYMBOLS.issubset(cls.symbol_table.keys()):
unregistered = [s for s in cls.SYMBOLS if s not in cls.symbol_table]
raise ValueError("The parser %r has unregistered symbols: %r" % (cls, unregistered))
cls.tokenizer = cls.create_tokenizer(cls.symbol_table)
build_tokenizer = build # For backward compatibility
@classmethod
def create_tokenizer(cls, symbol_table: MutableMapping[str, Type[TK_co]]) -> Pattern[str]:
"""
Returns a regex based tokenizer built from a symbol table of token classes.
The returned tokenizer skips extra spaces between symbols.
A regular expression is created from the symbol table of the parser using a template.
The symbols are inserted in the template putting the longer symbols first. Symbols and
their patterns can't contain spaces.
:param symbol_table: a dictionary containing the token classes of the formal language.
"""
character_patterns = []
string_patterns = []
name_patterns = []
custom_patterns = set()
for symbol, token_class in symbol_table.items():
if symbol in SPECIAL_SYMBOLS:
continue
elif token_class.pattern is not None:
custom_patterns.add(token_class.pattern)
elif cls.name_pattern.match(symbol) is not None:
name_patterns.append(re.escape(symbol))
elif len(symbol) == 1:
character_patterns.append(re.escape(symbol))
else:
string_patterns.append(re.escape(symbol))
symbols_patterns: List[str] = []
if string_patterns:
symbols_patterns.append('|'.join(sorted(string_patterns, key=lambda x: -len(x))))
if character_patterns:
symbols_patterns.append('[{}]'.format(''.join(character_patterns)))
if name_patterns:
symbols_patterns.append(r'\b(?:{})\b(?![\-\.])'.format(
'|'.join(sorted(name_patterns, key=lambda x: -len(x)))
))
if custom_patterns:
symbols_patterns.append('|'.join(custom_patterns))
tokenizer_pattern = r"({})|({})|({})|(\S)|\s+".format(
cls.literals_pattern.pattern,
'|'.join(symbols_patterns),
cls.name_pattern.pattern
)
return re.compile(tokenizer_pattern)
| [
2,
198,
2,
15069,
357,
66,
828,
2864,
12,
42334,
11,
311,
1797,
4090,
357,
24274,
3961,
329,
13435,
10422,
737,
198,
2,
1439,
2489,
10395,
13,
198,
2,
770,
2393,
318,
9387,
739,
262,
2846,
286,
262,
17168,
13789,
13,
198,
2,
4091,... | 2.273099 | 11,981 |
from preparation.transformers.base import BaseTransformer
from preparation.transformers.categorical import (CategoricalTransformer, OneHotEncodingTransformer)
__all__ = [
'BaseTransformer',
'CategoricalTransformer',
'OneHotEncodingTransformer',
]
| [
6738,
11824,
13,
35636,
364,
13,
8692,
1330,
7308,
8291,
16354,
198,
6738,
11824,
13,
35636,
364,
13,
66,
2397,
12409,
1330,
357,
34,
2397,
12409,
8291,
16354,
11,
1881,
21352,
27195,
7656,
8291,
16354,
8,
198,
198,
834,
439,
834,
796... | 3.333333 | 78 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, math
# https://www.nayuki.io/page/fast-fourier-transform-in-x86-assembly
sys.stdout.write("const float cos_table[512] = {\n")
for i in range(512):
if not (i % 8):
sys.stdout.write(" ")
sys.stdout.write("%9.6ff" % math.cos((math.pi * i) / 512))
if (i + 1) % 8:
sys.stdout.write(", ")
elif i != 511:
sys.stdout.write(",\n")
else:
sys.stdout.write("\n};\n")
sys.stdout.write("const float sin_table[512] = {\n")
for i in range(512):
if not (i % 8):
sys.stdout.write(" ")
sys.stdout.write("%9.6ff" % math.sin((math.pi * i) / 512))
if (i + 1) % 8:
sys.stdout.write(", ")
elif i != 511:
sys.stdout.write(",\n")
else:
sys.stdout.write("\n};\n")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
11,
10688,
198,
2,
3740,
1378,
2503,
13,
77,
323,
11308,
13,
952,
14,
7700,
14,
7217,
12,
69,
28... | 1.946988 | 415 |
import os
import pytest
import json
from xbox.nano.channel import Channel
from xbox.nano.enum import ChannelClass
@pytest.fixture(scope='session')
@pytest.fixture(scope='session')
@pytest.fixture(scope='session')
| [
11748,
28686,
198,
11748,
12972,
9288,
198,
11748,
33918,
198,
198,
6738,
2124,
3524,
13,
77,
5733,
13,
17620,
1330,
11102,
198,
6738,
2124,
3524,
13,
77,
5733,
13,
44709,
1330,
11102,
9487,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
... | 3.013699 | 73 |
import sys
import os
sys.path.append('.')
import os
import logging
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
logging.getLogger('matplotlib').setLevel(logging.WARNING)
import pandas as pd
import numpy as np
import time
from sslplay.data.digits import DataDigits
from sslplay.model.random_forest import ModelRF
from sslplay.utils.ssplit import ssplit
from sslplay.performance.f1 import f1
from sslplay.performance.auc import auc
from sslplay.performance.accuracy import accuracy
obj_data = DataDigits()
obj_data.load()
obj_data.parse()
Xt, yt, Xl, yl = ssplit(
obj_data.X, obj_data.y,
20, 80,
)
tmp_len = len(yl)
for p in [80, 60, 40, 20, 5, 2, 1]:
q = p/80.0
array_bool = np.random.choice(
a=[False, True],
size=tmp_len,
p=[1-q, q],
replace=True
)
Xtrain = Xl[array_bool, :]
ytrain = yl[array_bool]
obj_model = ModelRF()
obj_model.fit(Xtrain, ytrain, None)
array_test_pred = obj_model.predict(Xt)
array_test_real = yt
logging.info(str(p) + " --> " \
+ str(accuracy(array_test_real, array_test_pred)))
"""
2020-01-23 23:35:49,135 - 80 --> 0.8866071428571428
2020-01-23 23:35:49,386 - 60 --> 0.89375
2020-01-23 23:35:49,631 - 40 --> 0.9196428571428571
2020-01-23 23:35:49,878 - 20 --> 0.8964285714285715
2020-01-23 23:35:50,126 - 5 --> 0.8848214285714285
2020-01-23 23:35:50,377 - 2 --> 0.7446428571428572
2020-01-23 23:35:50,626 - 1 --> 0.5928571428571429
""" | [
11748,
25064,
198,
11748,
28686,
198,
17597,
13,
6978,
13,
33295,
10786,
2637,
8,
198,
198,
11748,
28686,
198,
198,
11748,
18931,
198,
6404,
2667,
13,
35487,
16934,
7,
18982,
11639,
4,
7,
292,
310,
524,
8,
82,
532,
4064,
7,
20500,
8... | 2.234679 | 669 |
from unittest import TestCase
import numpy as np
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from xrview.glyphs import (
Band,
BoxWhisker,
Circle,
Diamond,
ErrorCircle,
ErrorLine,
HBar,
Line,
Ray,
Rect,
Square,
Triangle,
VBar,
VLine,
Whisker,
get_glyph,
get_glyph_list,
)
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1489,
365,
71,
13,
27530,
1330,
29201,
6601,
7416,
198,
6738,
1489,
365,
71,
13,
29487,
889,
1330,
3785,
198,
198,
6738,
2124,
81,
1177,
13,
... | 2.186047 | 172 |
# square root of two -- how many iterations under 1001 contain a numerator with more digits than the denominator?
import timeit
start = timeit.default_timer()
print "Answer: %s" % euler_57()
stop = timeit.default_timer()
print "Time: %f" % (stop - start)
| [
2,
6616,
6808,
286,
734,
1377,
703,
867,
34820,
739,
1802,
16,
3994,
257,
5470,
1352,
351,
517,
19561,
621,
262,
31457,
1352,
30,
220,
198,
198,
11748,
640,
270,
628,
198,
9688,
796,
640,
270,
13,
12286,
62,
45016,
3419,
198,
220,
... | 3.011236 | 89 |
import time
from enum import Enum
from functools import reduce
import contextlib
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
from second.pytorch.models.voxelnet import register_voxelnet, VoxelNet
from second.pytorch.models import rpn
@register_voxelnet
# TODO: HOOK UP MAX_SWEEPS CONFIG
@register_voxelnet
| [
11748,
640,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
11748,
4732,
8019,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
20471,
1330,
1... | 3.141732 | 127 |
import typing
import hvac
| [
11748,
19720,
198,
198,
11748,
289,
85,
330,
628
] | 3.111111 | 9 |
"""Event."""
from __future__ import annotations
import logging
from typing import List
from ..consts import (
XML_TAG_CREATIONINFO,
XML_TAG_DESCRIPTION,
XML_TAG_MAGNITUDE,
XML_TAG_ORIGIN,
)
from .creation_info import CreationInfo
from .description import Description
from .element import Element
from .magnitude import Magnitude
from .origin import Origin
_LOGGER = logging.getLogger(__name__)
class Event(Element):
"""Event."""
@property
def description(self) -> Description | None:
"""Event description."""
description = self.attribute([XML_TAG_DESCRIPTION])
if description:
return Description(description)
return None
@property
def origin(self) -> Origin | None:
"""First defined origin."""
if self.origins:
return self.origins[0]
return None
@property
def origins(self) -> List[Origin] | None:
"""Origins defined for this event."""
origins = self.attribute([XML_TAG_ORIGIN])
entries = []
if origins:
if isinstance(origins, list):
for origin in origins:
entries.append(Origin(origin))
else:
entries.append(Origin(origins))
return entries
@property
def magnitude(self) -> Magnitude | None:
"""First defined magnitude."""
if self.magnitudes:
return self.magnitudes[0]
return None
@property
def magnitudes(self) -> List[Magnitude] | None:
"""Magnitudes defined for this event."""
magnitudes = self.attribute([XML_TAG_MAGNITUDE])
entries = []
if magnitudes:
if isinstance(magnitudes, list):
for magnitude in magnitudes:
entries.append(Magnitude(magnitude))
else:
entries.append(Magnitude(magnitudes))
return entries
@property
def creation_info(self) -> CreationInfo | None:
"""Creation info about this event."""
creation_info = self.attribute([XML_TAG_CREATIONINFO])
if creation_info:
return CreationInfo(creation_info)
return None
| [
37811,
9237,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
18931,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
11485,
1102,
6448,
1330,
357,
198,
220,
220,
220,
23735,
62,
42197,
62,
43387,
6234,
10778,
11,
19... | 2.372845 | 928 |
from flask import request
from flask_restplus import Resource, cors
from app.main.model.todo_dto import TodoDTO
from app.main.service.todo_service import (
get_all_todos,
save_new_todo,
get_todo,
delete_todo,
)
from app.main.util.auth_decorator import token_required
api = TodoDTO.api
_todo = TodoDTO.todo
@cors.crossdomain(origin="*")
@api.route("/")
class TodoList(Resource):
"""Shows a list of all todos, and lets you POST to add new tasks"""
@api.doc("list_todos")
@api.marshal_list_with(_todo)
def get(self):
"""List all tasks"""
return get_all_todos()
@api.doc("create_todo")
@api.expect(_todo, validate=True)
@token_required
@api.marshal_with(_todo, code=201)
@api.response(201, "Todo successfully created.")
def post(self):
"""Create a new task"""
data = request.json
return save_new_todo(data=data), 201
@cors.crossdomain(origin="*")
@api.route("/<todo_id>")
@api.response(404, "Todo not found")
@api.param("todo_id", "The task identifier")
class Todo(Resource):
"""Show a single todo item and lets you delete them"""
@api.doc("get_todo")
@api.marshal_with(_todo)
def get(self, todo_id):
"""Fetch a given resource"""
todo = get_todo(todo_id)
if not todo:
api.abort(404, "Todo {} doesn't exist".format(todo_id))
else:
return todo
@api.doc("delete_todo")
@api.response(204, "Todo deleted")
def delete(self, todo_id):
"""Delete a task given its identifier"""
todo = get_todo(todo_id)
if not todo:
api.abort(404, "Todo {} doesn't exist".format(todo_id))
else:
delete_todo(todo_id)
response_object = {
"status": "success",
"message": "Todo {} successfully deleted".format(todo_id),
}
return response_object, 204
@api.doc("update_todo")
@api.expect(_todo, validate=True)
@api.marshal_with(_todo)
def put(self, todo_id):
"""Update a task given its identifier"""
return DAO.update(todo_id, api.payload)
| [
6738,
42903,
1330,
2581,
198,
6738,
42903,
62,
2118,
9541,
1330,
20857,
11,
269,
669,
198,
198,
6738,
598,
13,
12417,
13,
19849,
13,
83,
24313,
62,
67,
1462,
1330,
309,
24313,
35,
10468,
198,
6738,
598,
13,
12417,
13,
15271,
13,
83,... | 2.182556 | 986 |
#!/usr/bin/env python
#author:Richard Peng
#project:Kindelabra
#website:http://www.richardpeng.com/projects/kindelabra/
#repository:https://github.com/richardpeng/Kindelabra
#license:Creative Commons GNU GPL v2
# (http://creativecommons.org/licenses/GPL/2.0/)
import struct
import zipfile
import re
import codecs
import os
import hashlib
import json
KINDLEROOT = '/mnt/us'
'''Kindlet metadata parsing
'''
'''Topaz metadata parsing. Almost verbatim code by Greg Riker from Calibre
'''
# Returns a SHA-1 hash
# Returns a full path on the kindle filesystem
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
9800,
25,
22245,
42341,
198,
2,
16302,
25,
35854,
417,
397,
430,
198,
2,
732,
12485,
25,
4023,
1378,
2503,
13,
7527,
446,
79,
1516,
13,
785,
14,
42068,
14,
11031,
417,
397,
430,... | 2.782609 | 207 |
import os
from app import create_app, db
app = create_app()
@app.cli.command()
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@app.cli.command()
def ipy():
"""Open IPython Debug Shell"""
ctx = app.app_context()
ctx.push()
from app.model import (Department, Leave, Role,
Overtime, TemporaryOvertime,
SignSheet, User, WorkArrangement)
__import__('IPython').embed()
ctx.pop()
@app.cli.command()
def init_db():
"""Init database and create basic test data."""
file = 'db.sqlite3'
if os.path.exists(file):
os.remove(file)
db.drop_all()
db.create_all()
create_test_data()
def create_test_data():
"""新建测试数据"""
from itertools import chain
from datetime import date, time, datetime
from app.model import (Department, Leave, Role,
Overtime, TemporaryOvertime,
SignSheet, User, WorkArrangement)
signsheet = [SignSheet(ID=1, staffID=1,
commitStamp=datetime(2000, 1, 1, 1, 1))]
departments = [Department(ID=1, name="销售"),
Department(ID=2, name="财务"),
Department(ID=3, name="技术")]
users = [User(ID=1, password="123456", name="老王",
role=Role.MANAGER, gender=False),
User(ID=2, password="123456", name="马大叔",
role=Role.CHARGE, gender=True, birthday=datetime(1978, 2, 15), department=departments[0], email="mahaoqu@gmail.com", image_url="img/ma.jpg"),
User(ID=3, password="123456", name="木木",
role=Role.CHARGE, birthday=datetime(1981, 11, 30), gender=False, department=departments[1], email="390400239@qq.com"),
User(ID=4, password="123456", name="小马",
role=Role.STAFF, gender=False, department=departments[0], email="mahaoqu@qq.com"),
User(ID=5, password="123456", name="小刚",
role=Role.STAFF, gender=False, department=departments[0]),
User(ID=6, password="123456", name="徐徐",
role=Role.STAFF, gender=True, department=departments[1]),
User(ID=7, password="123456", name="赵赵",
role=Role.STAFF, gender=True, department=departments[1], image_url="img/zhao.jpg")]
for d in chain(departments, users):
d.update_db()
Overtime(staff=users[3], status=1, reviewer=users[1], reason="晚上加班睡的香", beginDateTime=datetime(
2019, 6, 12, 18, 0), endDateTime=datetime(2019, 6, 13, 1, 0)).update_db()
Leave(staff=users[3], status=0, type=0, reason="回家种地", beginDateTime=datetime(
2019, 6, 14), endDateTime=datetime(2019, 6, 22)).update_db()
w = WorkArrangement(staff=users[3], date=date(2019, 6, 12), beginTime=time(
8, 0), endTime=time(18, 0))
s1 = SignSheet(user=users[4])
s1.commitStamp = datetime(2019, 6, 12, 8, 5)
s1.update_db()
s2 = SignSheet(user=users[4])
s2.commitStamp = datetime(2019, 6, 12, 17, 55)
s2.update_db()
w.beginSign = s1
w.endSign = s2
w.update_db()
| [
11748,
28686,
198,
198,
6738,
598,
1330,
2251,
62,
1324,
11,
20613,
198,
198,
1324,
796,
2251,
62,
1324,
3419,
628,
198,
31,
1324,
13,
44506,
13,
21812,
3419,
198,
4299,
1332,
33529,
198,
220,
220,
220,
37227,
10987,
262,
4326,
5254,
... | 2.056266 | 1,564 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="nnplot",
# packages = ['nnplot'],
version="1.0.1",
author="Yuval Ai",
author_email="yuval_a@rad.com",
description="Plot Neural Networks and the weights of the links between their nodes",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Yuval-Ai/nnplot",
keywords = ['nn', 'ai', 'visualizer', 'learning', 'artificial', 'intelligence','weights'],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Visualization",
],
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
... | 2.8 | 335 |
# -*- coding: utf-8 -*-
import requests
import json
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
7007,
198,
11748,
33918,
628
] | 2.65 | 20 |
"""
Senz Wifi Session
"""
import json
import requests
import os
import urllib3
from . import urls
def _validate_response(response):
""" Verify that response is OK """
if response.status_code == 200:
return
raise ResponseError(response.status_code, response.text)
class Error(Exception):
''' Senz Wifi session error '''
pass
class RequestError(Error):
''' Wrapped requests.exceptions.RequestException '''
pass
class LoginError(Error):
''' Login failed '''
pass
class ResponseError(Error):
''' Unexcpected response '''
class Session(object):
""" Senz Wifi session
Args:
username (str): Username used to login to Senz Wifi
password (str): Password used to login to Senz Wifi
"""
def login(self):
""" Login to Senz Wifi """
if os.path.exists(self._tokenFileName):
with open(self._tokenFileName, 'r') as cookieFile:
self._sessionId = cookieFile.read().strip()
if self._raw: print("--- token found")
try:
self._get_groups()
except ResponseError:
if self._raw: print("--- token probably expired")
self._sessionId = None
self._devices = None
os.remove(self._tokenFileName)
if self._sessionId is None:
self._create_session()
with open(self._tokenFileName, 'w') as tokenFile:
tokenFile.write(self._sessionId)
self._get_groups()
def _get_groups(self):
""" Get information about groups """
response = None
try:
response = requests.get(urls.get_groups(self._sessionId),headers=self._headers(), verify=self._verifySsl)
if 2 != response.status_code // 100:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
if(self._raw is True):
print("--- _get_groups()")
print("--- raw beginning ---")
print(response.text)
print("--- raw ending ---\n")
self._groups = json.loads(response.text)
| [
37811,
198,
10445,
89,
370,
22238,
23575,
198,
37811,
198,
198,
11748,
33918,
198,
11748,
7007,
198,
11748,
28686,
198,
11748,
2956,
297,
571,
18,
198,
6738,
764,
1330,
2956,
7278,
198,
198,
4299,
4808,
12102,
378,
62,
26209,
7,
26209,
... | 2.336756 | 974 |
import argparse
import glob
import os
import sys
import time
from multiprocessing import cpu_count
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter
import torch
import torch.nn as nn
from gensim.models import Word2Vec
from torch.utils.data import DataLoader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, help='Model file to load')
parser.add_argument('--reset_lr', action='store_true', help='Reset learning rate')
args = parser.parse_args()
model_file = args.model
reset_lr = args.reset_lr
## Import model from relevant lib
if model_file:
model_dir = os.path.dirname(os.path.abspath(model_file))
model_lib_dir = os.path.join(model_dir, 'lib')
sys.path.append(model_lib_dir)
# from config import Config
# from dataset import AlignCollate, ImageBatchSampler, TextArtDataLoader
from model import GANModel
else:
from lib.model import GANModel
from lib.config import Config
from lib.dataset import AlignCollate, ImageBatchSampler, TextArtDataLoader
CONFIG = Config()
## Data loaders
print("Data loaders initializing..")
train_dataset = TextArtDataLoader(CONFIG, kind='train')
val_dataset = TextArtDataLoader(CONFIG, kind='val')
train_align_collate = AlignCollate(CONFIG, mode='train')
val_align_collate = AlignCollate(CONFIG, mode='test')
# train_batch_sampler = ImageBatchSampler(CONFIG, kind='train')
# val_batch_sampler = ImageBatchSampler(CONFIG, kind='val')
train_loader = DataLoader(train_dataset,
batch_size=CONFIG.BATCH_SIZE,
shuffle=True,
num_workers=CONFIG.N_WORKERS,
pin_memory=True,
collate_fn=train_align_collate,
# sampler=train_batch_sampler,
drop_last=True,
)
val_loader = DataLoader(val_dataset,
batch_size=CONFIG.BATCH_SIZE,
shuffle=False,
num_workers=CONFIG.N_WORKERS,
pin_memory=True,
collate_fn=val_align_collate,
# sampler=val_batch_sampler,
drop_last=True,
)
print("\tTrain size:", len(train_dataset))
print("\tValidation size:", len(val_dataset))
n_train_batch = len(train_dataset) // CONFIG.BATCH_SIZE
n_val_batch = len(val_dataset) // CONFIG.BATCH_SIZE
time.sleep(0.5)
## Init model with G and D
print("\nModel initializing..")
model = GANModel(CONFIG, model_file=model_file, mode='train', reset_lr=reset_lr)
time.sleep(1.0)
print("\nTraining starting..")
for epoch in range(model.epoch, model.epoch + CONFIG.N_EPOCHS):
print("Epoch {}/{}:".format(epoch, model.epoch + CONFIG.N_EPOCHS - 1))
for phase in ['train', 'val']:
phase_start = time.time()
print("\t{} phase:".format(phase.title()))
total_loss_g = 0.0
total_loss_d = 0.0
total_loss_g_refiner = 0.0
total_loss_d_decider = 0.0
total_loss_g_refiner2 = 0.0
total_loss_d_decider2 = 0.0
total_loss_gp_fr = 0.0
total_loss_gp_rf = 0.0
total_loss_gp_decider_fr = 0.0
total_loss_gp_decider2_fr = 0.0
total_acc_rr = 0.0
total_acc_rf = 0.0
total_acc_fr = 0.0
total_acc_decider_rr = 0.0
total_acc_decider_fr = 0.0
total_acc_decider2_rr = 0.0
total_acc_decider2_fr = 0.0
if phase == 'train':
## Set network to train
train_D = True if (epoch - 1) % CONFIG.TRAIN_D_TREND == 0 else False
train_G = True if (epoch - 1) % CONFIG.TRAIN_G_TREND == 0 else False
print("\tUpdate D: {}, Update G: {}".format(str(train_D), str(train_G)))
data_loader = train_loader
n_batch = n_train_batch
model.G.train()
model.D.train()
model.G_refiner.train()
model.D_decider.train()
model.G_refiner2.train()
model.D_decider2.train()
else:
data_loader = val_loader
n_batch = n_val_batch
model.G.eval()
model.D.eval()
model.G_refiner.eval()
model.D_decider.eval()
model.G_refiner2.eval()
model.D_decider2.eval()
train_D = False
train_G = False
for i, data in enumerate(data_loader):
iteration = (epoch - 1) * n_batch + i
## Get data
real_first_images, real_second_images, real_images, real_wvs, fake_wvs = data
batch_size = real_images.size()[0]
## Fit batch
fake_images, refined1, refined2 = model.fit(data, phase=phase, train_D=train_D, train_G=train_G)
## Update total loss
loss_g, loss_d, loss_g_refiner, loss_d_decider, loss_g_refiner2, loss_d_decider2,\
loss_gp_fr, loss_gp_rf, loss_gp_decider_fr, loss_gp_decider2_fr = model.get_losses()
total_loss_g += loss_g
total_loss_d += loss_d
total_loss_g_refiner += loss_g_refiner
total_loss_d_decider += loss_d_decider
total_loss_g_refiner2 += loss_g_refiner2
total_loss_d_decider2 += loss_d_decider2
if loss_gp_fr:
total_loss_gp_fr += loss_gp_fr
if loss_gp_rf:
total_loss_gp_rf += loss_gp_rf
if loss_gp_decider_fr:
total_loss_gp_decider_fr += loss_gp_decider_fr
if loss_gp_decider2_fr:
total_loss_gp_decider2_fr += loss_gp_decider2_fr
## Get D accuracy
acc_rr, acc_rf, acc_fr, acc_decider_rr, acc_decider_fr, acc_decider2_rr, acc_decider2_fr = model.get_D_accuracy()
total_acc_rr += acc_rr
total_acc_rf += acc_rf
total_acc_fr += acc_fr
total_acc_decider_rr += acc_decider_rr
total_acc_decider_fr += acc_decider_fr
total_acc_decider2_rr += acc_decider2_rr
total_acc_decider2_fr += acc_decider2_fr
## Save logs
if iteration % CONFIG.N_LOG_BATCH == 0:
log_tuple = phase, epoch, iteration, loss_g, loss_d, loss_g_refiner, loss_d_decider, loss_g_refiner2, loss_d_decider2,\
acc_rr, acc_rf, acc_fr, acc_decider_rr, acc_decider_fr, acc_decider2_rr, acc_decider2_fr
model.save_logs(log_tuple)
# Print logs
if i % CONFIG.N_PRINT_BATCH == 0:
print("\t\tBatch {: 4}/{: 4}:".format(i, n_batch), end=' ')
if CONFIG.GAN_LOSS1 == 'wgangp':
print("G loss: {:.4f} | D loss: {:.4f}".format(loss_g, loss_d), end=' ')
print("| G refiner loss: {:.4f} | D decider loss {:.4f}".format(loss_g_refiner, loss_d_decider), end=' ')
print("| G refiner2 loss: {:.4f} | D decider2 loss {:.4f}".format(loss_g_refiner2, loss_d_decider2), end=' ')
print("| GP loss fake-real: {:.4f}".format(loss_gp_fr), end=' ')
print("| GP loss real-fake: {:.4f}".format(loss_gp_rf), end=' ')
print("| GP loss fake refined1-fake: {:.4f}".format(loss_gp_decider_fr), end=' ')
print("| GP loss fake refined2-fake: {:.4f}".format(loss_gp_decider2_fr))
else:
print("G loss: {:.4f} | D loss: {:.4f}".format(loss_g, loss_d), end=' ')
print("| G refiner loss: {:.4f} | D decider loss {:.4f}".format(loss_g_refiner, loss_d_decider), end=' ')
print("| G refiner2 loss: {:.4f} | D decider2 loss {:.4f}".format(loss_g_refiner2, loss_d_decider2))
print("\t\t\tAccuracy D real-real: {:.4f} | real-fake: {:.4f} | fake-real {:.4f}".format(acc_rr, acc_rf, acc_fr))
print("\t\t\tAccuracy D decider real-real: {:.4f} | fake refined1-real {:.4f}".format(acc_decider_rr, acc_decider_fr))
print("\t\t\tAccuracy D decider2 real-real: {:.4f} | fake refined2-real {:.4f}".format(acc_decider2_rr, acc_decider2_fr))
## Save visual outputs
try:
if iteration % CONFIG.N_SAVE_VISUALS_BATCH == 0 and phase == 'val':
output_filename = "{}_{:04}_{:08}.png".format(model.model_name, epoch, iteration)
grid_img_pil = model.generate_grid(real_wvs, fake_images, refined1, refined2, real_images, train_dataset.word2vec_model)
model.save_img_output(grid_img_pil, output_filename)
# model.save_grad_output(output_filename)
except Exception as e:
print('Grid image generation failed.', e, 'Passing.')
total_loss_g /= (i + 1)
total_loss_d /= (i + 1)
total_loss_g_refiner /= (i + 1)
total_loss_d_decider /= (i + 1)
total_loss_g_refiner2 /= (i + 1)
total_loss_d_decider2 /= (i + 1)
total_loss_gp_fr /= (i + 1)
total_loss_gp_rf /= (i + 1)
total_loss_gp_decider_fr /= (i + 1)
total_loss_gp_decider2_fr /= (i + 1)
total_acc_rr /= (i + 1)
total_acc_rf /= (i + 1)
total_acc_fr /= (i + 1)
total_acc_decider_rr /= (i + 1)
total_acc_decider_fr /= (i + 1)
total_acc_decider2_rr /= (i + 1)
total_acc_decider2_fr /= (i + 1)
if CONFIG.GAN_LOSS1 == 'wgangp':
print("\t\t{p} G loss: {:.4f} | {p} D loss: {:.4f}".format(total_loss_g, total_loss_d, p=phase.title()), end=' ')
print("| {p} G refiner loss: {:.4f} | {p} D decider loss: {:.4f}".format(total_loss_g_refiner, total_loss_d_decider, p=phase.title()), end=' ')
print("| {p} G refiner2 loss: {:.4f} | {p} D decider2 loss: {:.4f}".format(total_loss_g_refiner2, total_loss_d_decider2, p=phase.title()), end=' ')
print("| GP loss fake-real: {:.4f}".format(total_loss_gp_fr), end=' ')
print("| GP loss real-fake: {:.4f}".format(total_loss_gp_rf), end=' ')
print("| GP loss real refined1-fake: {:.4f}".format(total_loss_gp_decider_fr), end=' ')
print("| GP loss real refined2-fake: {:.4f}".format(total_loss_gp_decider2_fr))
else:
print("\t\t{p} G loss: {:.4f} | {p} D loss: {:.4f}".format(total_loss_g, total_loss_d, p=phase.title()), end=' ')
print("\t\t{p} G refiner loss: {:.4f} | {p} D decider loss: {:.4f}".format(total_loss_g_refiner, total_loss_d_decider, p=phase.title()))
print("\t\t{p} G refiner2 loss: {:.4f} | {p} D decider2 loss: {:.4f}".format(total_loss_g_refiner2, total_loss_d_decider2, p=phase.title()))
print("\t\tAccuracy D real-real: {:.4f} | real-fake: {:.4f} | fake-real {:.4f}".format(total_acc_rr, total_acc_rf, total_acc_fr))
print("\t\tAccuracy D decider real-real: {:.4f} | fake refined1-real {:.4f}".format(total_acc_decider_rr, total_acc_decider_fr))
print("\t\tAccuracy D decider2 real-real: {:.4f} | fake refined2-real {:.4f}".format(total_acc_decider2_rr, total_acc_decider2_fr))
print("\t{} time: {:.2f} seconds".format(phase.title(), time.time() - phase_start))
## Update lr
model.update_lr(total_loss_g, total_loss_d, total_loss_g_refiner, total_loss_d_decider, total_loss_g_refiner2, total_loss_d_decider2)
## Save model
if epoch % CONFIG.N_SAVE_MODEL_EPOCHS == 0:
model.save_model_dict(epoch, iteration, total_loss_g, total_loss_d,\
total_loss_g_refiner, total_loss_d_decider, total_loss_g_refiner2, total_loss_d_decider2)
| [
11748,
1822,
29572,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
18540,
305,
919,
278,
1330,
42804,
62,
9127,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32... | 1.85928 | 6,751 |
from rekall import Interval, IntervalSet, IntervalSetMapping
from rekall.bounds import Bounds3D
from rekall.predicates import overlaps
from rekall.stdlib.merge_ops import payload_first
import unittest
from pstats import Stats
import cProfile
| [
6738,
302,
74,
439,
1330,
4225,
2100,
11,
4225,
2100,
7248,
11,
4225,
2100,
7248,
44,
5912,
198,
6738,
302,
74,
439,
13,
65,
3733,
1330,
347,
3733,
18,
35,
198,
6738,
302,
74,
439,
13,
28764,
16856,
1330,
12893,
1686,
198,
6738,
3... | 3.253333 | 75 |
# -*- coding: utf-8 -*-
"""
Collect the results for a given hint command.
"""
__author__ = "Jakrin Juangbhanich"
__email__ = "juangbhanich.k@gmail.com"
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
31337,
262,
2482,
329,
257,
1813,
9254,
3141,
13,
198,
37811,
198,
198,
834,
9800,
834,
796,
366,
41,
461,
12769,
12585,
648,
65,
7637,
488,
1,
198,
... | 2.460317 | 63 |
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtOpenGL import *
from PyQt5.QtWidgets import *
# TODO: Maybe export all from PyQt5.QtCore.Qt?
# Because Qt.Key_W is better than Qt.Qt.Key_W
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
11505,
8763,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
... | 2.141414 | 99 |
from jira_api import get_all_tasks
from jira_api import flatten_dict
import pandas as pd
import numpy as np
import datetime
import json
import yaml
import urllib3
import os
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# load JIRA username and password
jira_creds = yaml.load(open('./user_dev.yml'))
username = jira_creds['user']['name']
password = jira_creds['user']['password']
# point to JIRA development environment
# url = 'https://10.221.100.4'
# point to JIRA production environment
url = 'https://jira.emdeon.net'
def main():
"""
Get the status of all Epics in the Artificial Intelligence Project.
Persist all project status to s3 for reporting downstream.
"""
all_tasks = get_all_tasks(url,
username,
password,
"AI",
start_at=0,
max_results=100)
# pipe issues to txt
now = datetime.datetime.now()
today = now.strftime("%Y-%m-%d")
# persist results to ./ml_engagements
if not os.path.exists('./status_report'):
os.makedirs('./status_report')
with open(
'./status_report/raw/%s.txt' % str(today), 'w') as outfile:
json.dump(all_tasks, outfile)
df = pd.DataFrame.from_dict(all_tasks['issues'])
# flatten all issues
explode_issues = pd.DataFrame.from_dict(
list(map(lambda x: flatten_dict(x), df['fields'])))
field_mapping = json.load(open('./scripts/chc_jira_prod_fields.json'))
# find and replace customfields with field names
for i in np.arange(0, len(field_mapping)):
explode_issues.columns = explode_issues.columns.str.replace(
dict(field_mapping[i])['id'],
dict(field_mapping[i])['name'])
# format column names to lower case w/o spaces
explode_issues.columns = explode_issues.columns.str.lower().str.replace(' ', '_')
# persist flattened issues to .csv
explode_issues.reset_index().to_csv(
'../status_report/flattened/tasks/%s.txt' % str(today),
index=False)
# kludgey update to extract and flatten sub-tasks
temp = list(map(lambda x: explode_json(x), explode_issues['sub-tasks']))
temp_2 = pd.DataFrame(list(map(lambda x: combine_json(x), temp))).dropna()
indices = []
dfs = []
for i in temp_2.index:
indices.append(i)
dfs.append(temp_2.loc[i, 0])
temp_3 = pd.concat(dfs, axis=0, keys=indices)
column_names = 'subtask_' + temp_3.columns
temp_3.columns = column_names
temp_3 = temp_3.reset_index().drop('level_1', axis=1)
temp_3.columns = temp_3.columns.str.replace('level_0', 'index')
# persist flattened issues to .csv
temp_3.to_csv(
'../status_report/flattened/sub_tasks/%s.txt' % str(today),
index=False)
if __name__ == "__main__":
main()
| [
6738,
474,
8704,
62,
15042,
1330,
651,
62,
439,
62,
83,
6791,
198,
6738,
474,
8704,
62,
15042,
1330,
27172,
268,
62,
11600,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4818,
8079,
198,
11748,
... | 2.408463 | 1,158 |
#!/usr/bin/env python
"""
Creates a secure, random password for the Django user 'admin'
"""
import hashlib
import imp
import os
import sqlite3
import django
from django.contrib.auth.models import User
django.setup()
# Load the OpenShift helper library
lib_path = os.environ['OPENSHIFT_REPO_DIR'] + 'web/'
modinfo = imp.find_module('openshiftlibs', [lib_path])
openshiftlibs = imp.load_module(
'openshiftlibs', modinfo[0], modinfo[1], modinfo[2])
# A default password so that OpenShift can secure it
default_password = {
'KEY': 'ZjSqGumxnGbLrFQd2eNrTgSGQYmbskThaqaba3etSJxwrA5Xnx'}
# Replace default keys with dynamic values
use_keys = openshiftlibs.openshift_secure(default_password)
# Set Django settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web.settings")
u = User.objects.get(username='admin')
u.set_password(use_keys['KEY'])
u.save()
# Print the new password info
print "Django application credentials:\n\tuser: admin\n\t" + use_keys['KEY']
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
16719,
274,
257,
5713,
11,
4738,
9206,
329,
262,
37770,
2836,
705,
28482,
6,
198,
37811,
198,
11748,
12234,
8019,
198,
11748,
848,
198,
11748,
28686,
198,
11748,
44161,
578,
... | 2.755682 | 352 |
import os
import pytest
import server as serv
filename = os.path.join("tests", "b64.txt")
with open(filename, "r") as fobj:
b64_str = fobj.read()
type_keys = {"a": int, "b": float, "c": str, "d": list}
@pytest.mark.parametrize("my_input, expected", [
(1, 1),
("1", 1),
("one", False),
(0j + 1, 1),
(1j + 1, False),
(1.4, False),
("1.4", False),
("12a", False),
("123", 123)
])
@pytest.mark.parametrize("my_input, expected", [
(1.0, 1.0),
("1", 1.0),
("one", False),
(0j + 1, 1.0),
(1j + 1, False),
(1.4, 1.4),
("12a", False),
("123.3", 123.3)
])
@pytest.mark.parametrize("my_input, exp_in, exp_out", [
({"a": 1, "b": "Smith.J", "c": 50.2}, {"a": int, "b": str, "c": float},
(True, 200)),
({"ab": 1, "b": "Smith.J", "c": 50.2}, {"a": int, "b": str, "c": float},
(True, 200)),
({"a": "1", "b": "Smith.J", "c": 50.2}, {"a": int, "b": str, "c": float},
("the key 'a' is a <class 'str'>, should be <class 'int'>", 400)),
({"a": 1.4, "b": "Smith.J", "c": 50.2}, {"a": int, "b": str, "c": float},
("the key 'a' is a <class 'float'>, should be <class 'int'>", 400)),
([], {"a": int, "b": str, "c": float},
("The input was not a dictionary.", 400))
])
@pytest.mark.parametrize("my_in, types, expected", [
({"a": "1", "b": "1.1", "c": "word", "d": ["1"]}, type_keys,
{"a": 1, "b": 1.1, "c": "word", "d": ["1"]}),
({"a": "1.1"}, type_keys, "key a is not convertable to an integer"),
({"b": "one"}, type_keys, "key b is not convertable to a float")
])
| [
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
4382,
355,
1113,
198,
198,
34345,
796,
28686,
13,
6978,
13,
22179,
7203,
41989,
1600,
366,
65,
2414,
13,
14116,
4943,
198,
4480,
1280,
7,
34345,
11,
366,
81,
4943,
355,
277,... | 2.070496 | 766 |
#!/bin/python
# Python3 script which continuously reads Gmail messages and uploads them to GitHub
#
# This script assumes there exists a credentials directory (which is .gitignored) containing:
# - oauth secret api key
# - stored oauth access token for the gmail account
# - GitHub personal access token
#
# This also requires:
# - google-api-python-client (can install via pip)
#
# TODO:
# - add signature parsing so we retain only the body of the message
# brief testing with mailgun's open source talon piece ... showed that it didn't work <.<
import httplib2
import os
import json
import time
import email
import base64
import requests
import datetime
import traceback
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.modify https://www.googleapis.com/auth/gmail.labels'
CLIENT_SECRET_FILE = 'oauth.json'
STORED_CREDENTIAL_FILE = 'stored.json'
CREDENTIAL_DIR = 'credentials'
APPLICATION_NAME = 'Blog Comments'
GMAIL_LABEL = 'blog_comments/needs_upload'
GITHUB_PERSONAL_ACCESS_TOKEN_FILE = 'github_token.txt'
GITHUB_USERNAME = 'wko27'
GITHUB_REPO = 'blog'
POLL_DELAY_SECONDS = 20
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
credential_dir = os.path.relpath(CREDENTIAL_DIR)
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, STORED_CREDENTIAL_FILE)
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
client_secret_path = os.path.join(credential_dir, CLIENT_SECRET_FILE)
flow = client.flow_from_clientsecrets(client_secret_path, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def get_gmail_service():
""" Returns Gmail service object """
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
return discovery.build('gmail', 'v1', http=http)
def extract_mime_content(mime):
"""Extracts text from message MIME object
Returns:
String, contents
"""
if mime.is_multipart():
for part in mime.walk():
content_type = part.get_content_type()
content_disposition = str(part.get('Content-Disposition'))
# skip any text/plain (txt) attachments
if content_type == 'text/plain' and 'attachment' not in content_disposition:
body = part.get_payload(decode=True) # decode
break
# not multipart – i.e. plain text, no attachments, keeping fingers crossed
else:
body = mime.get_payload(decode=True)
return str(body, 'utf-8')
def retrieve_parts(message):
"""Extracts subject, from email address, author, and body from a message
Returns:
dict with post_id, author, email, and comment set
"""
msg_bytes = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))
msg_str = str(msg_bytes, 'utf-8')
mime_msg = email.message_from_string(msg_str)
body = extract_mime_content(mime_msg)
split = body.split('\n', 1)
first = split[0]
author = first[0:first.rfind(':')]
comment = split[1]
subject = mime_msg['Subject']
post_id = subject[len('[blog-comment]:'):]
from_email = email.utils.parseaddr(mime_msg['From'])[1]
return {
"post_id": post_id,
"author": author,
"email": from_email,
"comment": comment,
}
def main():
"""Extracts emails from particular label and uploads the content body to a blog on Git"""
with open(CREDENTIAL_DIR + '/' + GITHUB_PERSONAL_ACCESS_TOKEN_FILE, 'r') as github_token_file:
github_token = github_token_file.read().strip()
while True:
# Retrieve this in a loop in case the token expires
service = get_gmail_service()
print("Checking for new messages")
results = service.users().messages().list(userId='me', q='label: ' + GMAIL_LABEL).execute()
messages = results.get('messages', [])
if len(messages) > 0:
gmail_label_id = retrieve_label_id(service)
for message in messages:
try:
print("Retrieving full message for " + message['id'])
message = service.users().messages().get(userId='me', id=message['id'], format='raw').execute()
print("Extracting parts of message " + message['id'])
comment_parts = retrieve_parts(message)
print("Uploading comment from " + message['id'])
upload_to_github(github_token, comment_parts)
print('Removing label ' + GMAIL_LABEL + ' from message ' + message['id'])
service.users().messages().modify(userId='me', id=message['id'], body={"removeLabelIds": [gmail_label_id]}).execute()
except Exception as e:
print("Unable to process message " + message['id'])
traceback.print_exc()
# Sleep for 20 seconds
print("Finished processing messages, waiting {} seconds before next check".format(POLL_DELAY_SECONDS))
time.sleep(POLL_DELAY_SECONDS)
raise AssertionError("Should never get here ...")
if __name__ == '__main__':
main()
| [
2,
48443,
8800,
14,
29412,
198,
2,
11361,
18,
4226,
543,
17282,
9743,
33662,
6218,
290,
9516,
82,
606,
284,
21722,
198,
2,
198,
2,
770,
4226,
18533,
612,
7160,
257,
18031,
8619,
357,
4758,
318,
764,
18300,
570,
1850,
8,
7268,
25,
... | 2.502498 | 2,402 |
nucleotides = ["A", "C", "G", "T"]
# Average masses of monophosphate deoxy nucleotides
unambiguous_dna_weights = {"A": 331.2218, "C": 307.1971, "G": 347.2212, "T": 322.2085}
# Monoisotopic masses of monophospate deoxy nucleotides
monoisotopic_unambiguous_dna_weights = {
"A": 331.06817,
"C": 307.056936,
"G": 347.063084,
"T": 322.056602,
}
unambiguous_rna_weights = {"A": 347.2212, "C": 323.1965, "G": 363.2206, "U": 324.1813}
monoisotopic_unambiguous_rna_weights = {
"A": 347.063084,
"C": 323.051851,
"G": 363.057999,
"U": 324.035867,
}
protein_weights = {
"A": 89.0932,
"C": 121.1582,
"D": 133.1027,
"E": 147.1293,
"F": 165.1891,
"G": 75.0666,
"H": 155.1546,
"I": 131.1729,
"K": 146.1876,
"L": 131.1729,
"M": 149.2113,
"N": 132.1179,
"O": 255.3134,
"P": 115.1305,
"Q": 146.1445,
"R": 174.201,
"S": 105.0926,
"T": 119.1192,
"U": 168.0532,
"V": 117.1463,
"W": 204.2252,
"Y": 181.1885,
}
monoisotopic_protein_weights = {
"A": 89.047678,
"C": 121.019749,
"D": 133.037508,
"E": 147.053158,
"F": 165.078979,
"G": 75.032028,
"H": 155.069477,
"I": 131.094629,
"K": 146.105528,
"L": 131.094629,
"M": 149.051049,
"N": 132.053492,
"O": 255.158292,
"P": 115.063329,
"Q": 146.069142,
"R": 174.111676,
"S": 105.042593,
"T": 119.058243,
"U": 168.964203,
"V": 117.078979,
"W": 204.089878,
"Y": 181.073893,
}
RNA_Codons = {
# 'M' - START, '_' - STOP
"GCU": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"UGU": "C", "UGC": "C",
"GAU": "D", "GAC": "D",
"GAA": "E", "GAG": "E",
"UUU": "F", "UUC": "F",
"GGU": "G", "GGC": "G", "GGA": "G", "GGG": "G",
"CAU": "H", "CAC": "H",
"AUA": "I", "AUU": "I", "AUC": "I",
"AAA": "K", "AAG": "K",
"UUA": "L", "UUG": "L", "CUU": "L", "CUC": "L", "CUA": "L", "CUG": "L",
"AUG": "M",
"AAU": "N", "AAC": "N",
"CCU": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAA": "Q", "CAG": "Q",
"CGU": "R", "CGC": "R", "CGA": "R", "CGG": "R", "AGA": "R", "AGG": "R",
"UCU": "S", "UCC": "S", "UCA": "S", "UCG": "S", "AGU": "S", "AGC": "S",
"ACU": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"GUU": "V", "GUC": "V", "GUA": "V", "GUG": "V",
"UGG": "W",
"UAU": "Y", "UAC": "Y",
"UAA": "_", "UAG": "_", "UGA": "_"
}
DNA_Codons = {
# 'M' - START, '_' - STOP
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"TGT": "C", "TGC": "C",
"GAT": "D", "GAC": "D",
"GAA": "E", "GAG": "E",
"TTT": "F", "TTC": "F",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G",
"CAT": "H", "CAC": "H",
"ATA": "I", "ATT": "I", "ATC": "I",
"AAA": "K", "AAG": "K",
"TTA": "L", "TTG": "L", "CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L",
"ATG": "M",
"AAT": "N", "AAC": "N",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAA": "Q", "CAG": "Q",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R", "AGA": "R", "AGG": "R",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S", "AGT": "S", "AGC": "S",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"TGG": "W",
"TAT": "Y", "TAC": "Y",
"TAA": "_", "TAG": "_", "TGA": "_"
}
| [
198,
77,
14913,
313,
1460,
796,
14631,
32,
1600,
366,
34,
1600,
366,
38,
1600,
366,
51,
8973,
198,
198,
2,
13475,
14568,
286,
937,
2522,
14222,
378,
390,
23536,
17751,
313,
1460,
198,
403,
4131,
29709,
62,
67,
2616,
62,
43775,
796,
... | 1.705518 | 1,939 |
from distutils.core import setup
setup(
name='pybinarymoip',
version='0.0.8',
license='MIT',
description='Python library for Binary Media over IP (MOIP), used for Home Assistant',
author='Greg J. Badros',
author_email='badros@gmail.com',
url='http://github.com/gjbadros/pybinarymoip',
packages=['pybinarymoip'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Topic :: Home Automation',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
9078,
39491,
5908,
541,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
15,
13,
23,
3256,
198,
220,
220,
220,
5964,
11639,
36393,
3256,
198... | 2.720307 | 261 |
# -*- coding:utf-8 -*-
# @Time : 2019-12-30 13:41
# @Author : liuqiuxi
# @Email : liuqiuxi1990@gmail.com
# @File : indexfeeds.py
# @Project : datafeeds
# @Software: PyCharm
# @Remark : This is all data source index feeds
import datetime
from datafeeds.utils import BarFeedConfig
from datafeeds import logger
from datafeeds.winddatabasefeeds.indexfeedswinddatabase import AIndexQuotationWindDataBase
from datafeeds.winddatabasefeeds.indexfeedswinddatabase import AIndexWeightsWindDataBase
from datafeeds.jqdatafeeds.indexfeedsjqdata import AIndexQuotationJqData
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
2,
2488,
7575,
220,
220,
220,
1058,
13130,
12,
1065,
12,
1270,
1511,
25,
3901,
201,
198,
2,
2488,
13838,
220,
1058,
7649,
84,
40603,
2821,
72,
201,
198,
2,
2488,
1... | 2.602564 | 234 |
from perl import BarnOwl
m = BarnOwl.Module.Python
| [
6738,
48746,
1330,
11842,
46,
40989,
198,
76,
796,
11842,
46,
40989,
13,
26796,
13,
37906,
198
] | 3 | 17 |
# encoding: utf-8
from django import forms
from django.conf import settings
from django.views import generic
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import redirect, get_object_or_404
from django.utils.http import base36_to_int, int_to_base36
from django.contrib import auth, messages
from django.contrib.auth import get_user_model
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.auth.tokens import default_token_generator
import workon.utils
import workon.forms
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
33571,
1330,
14276,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
... | 3.246988 | 166 |
# Copyright (C) 2019 Akamai Technologies, Inc.
# Copyright (C) 2011-2016 Nominum, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Nominum Command Channel ThreadedCloser
A ThreadedCloser is a base class used for objects that want to be
automatically closed on command, when an idle timeout occurs, or after
a maximum lifetime.
Additionally, callbacks to run at close time can be specified with
at_close().
To use, simply subclass and override _close().
"""
import threading
import time
DEFAULT_TIMEOUT = 300
| [
2,
15069,
357,
34,
8,
13130,
9084,
1689,
72,
21852,
11,
3457,
13,
198,
2,
15069,
357,
34,
8,
2813,
12,
5304,
399,
6351,
388,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
... | 3.68705 | 278 |
#!/usr/bin/env python
import sys
import multiprocessing as mp
import numpy as np
import scipy.optimize as op
import copy
import blackboxhelper as bbh
import utils as u
from skopt import Optimizer
import skopt.acquisition as acq
import argparse
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel,WhiteKernel, Matern
VERSION = 180709
def get_default_executor():
"""
Provide a default executor (a context manager
returning an object with a map method).
This is the multiprocessing Pool object () for python3.
The multiprocessing Pool in python2 does not have an __enter__
and __exit__ method, this function provides a backport of the python3 Pool
context manager.
Returns
-------
Pool : executor-like object
An object with context manager (__enter__, __exit__) and map method.
"""
if (sys.version_info > (3, 0)):
Pool = mp.Pool
return Pool
else:
from contextlib import contextmanager
from functools import wraps
@wraps(mp.Pool)
@contextmanager
return Pool
def getInitialPoints(box,n):
"""
This function will return a list of points with shape (n,d).
Those points will form a latin hypercube such that the
positions minimize a 1/r potential energy.
Parameters
----------
box : List of lists of floats
It should contain [minimumValue, maximumValue] for each dimension.
It should have shape (d,2)
n : int
Number of initial points which you would like.
Returns
-------
points : list of lists of floats
Array of points uniformly placed in the initial box.
In should have shape (n,d)
"""
points = latin(n,len(box))
return np.asarray(unScalePoints(box,points))
def search(f, box, n, m, batch, resfile,
rho0=0.5, p=1.0, nrand=10000, nrand_frac=0.05,
executor=get_default_executor(), breakCheckFn=default_break_checker,plot=False):
"""
Minimize given expensive black-box function and save results into text file.
Parameters
----------
f : callable
The objective function to be minimized.
box : list of lists
List of ranges for each parameter.
n : int
Number of initial function calls.
m : int
Number of subsequent function calls.
batch : int
Number of function calls evaluated simultaneously (in parallel).
resfile : str
Text file to save results.
rho0 : float, optional
Initial "balls density".
p : float, optional
Rate of "balls density" decay (p=1 - linear, p>1 - faster, 0<p<1 - slower).
nrand : int, optional
Number of random samples that is generated for space rescaling.
nrand_frac : float, optional
Fraction of nrand that is actually used for space rescaling.
executor : callable, optional
Should have a map method and behave as a context manager.
Allows the user to use various parallelisation tools
as dask.distributed or pathos.
"""
# space size
d = len(box)
# adjusting the number of function calls to the batch size
if n % batch != 0:
n = n - n % batch + batch
if m % batch != 0:
m = m - m % batch + batch
# go from normalized values (unit cube) to absolute values (box)
# generating latin hypercube
points = np.zeros((n, d+1))
points[:, 0:-1] = latin(n, d)
# initial sampling
for i in range(n//batch):
with executor() as e:
points[batch*i:batch*(i+1), -1] = list(e.map(f, list(map(cubetobox, points[batch*i:batch*(i+1), 0:-1]))))
# normalizing function values
fmax = max(abs(points[:, -1]))
points[:, -1] = points[:, -1]/fmax
# volume of d-dimensional ball (r = 1)
v1 = getBallVolume(d)
# subsequent iterations (current subsequent iteration = i*batch+j)
for i in range(m//batch):
fit = getFit(points,nrand=nrand,nrand_frac=nrand_frac)
## Plot if you want to
if plot:
bbh.plotFit(fit,points,fmax,resfile + '.' + str(i) + '.png')
# check if the current fit is sufficiently converged.
if i > 0:
if breakCheckFn(fit, prevFit, fmax, prevFmax,d):
break
# store the current fit for use in the next iteration
prevFit = fit
prevFmax = fmax
points, newpoints = getNextPointsRBF(fit,points,batch,rho0=rho0, p=p)
with executor() as e:
points[n+batch*i:n+batch*(i+1), -1] = list(e.map(f, list(map(cubetobox, points[n+batch*i:n+batch*(i+1), 0:-1]))))/fmax
# saving results into text file
points[:, 0:-1] = list(map(cubetobox, points[:, 0:-1]))
points[:, -1] = points[:, -1]*fmax
points = points[points[:, -1].argsort()]
labels = [' par_'+str(i+1)+(7-len(str(i+1)))*' '+',' for i in range(d)]+[' f_value ']
np.savetxt(resfile, points, delimiter=',', fmt=' %+1.4e', header=''.join(labels), comments='')
return points, returnedScaleFit
def latin(n, d):
"""
Build latin hypercube.
Parameters
----------
n : int
Number of points.
d : int
Size of space.
Returns
-------
lh : ndarray
Array of points uniformly placed in d-dimensional unit cube.
"""
# spread function
# starting with diagonal shape
lh = [[i/(n-1.)]*d for i in range(n)]
# minimizing spread function by shuffling
minspread = spread(lh)
for i in range(1000):
point1 = np.random.randint(n)
point2 = np.random.randint(n)
dim = np.random.randint(d)
newlh = np.copy(lh)
newlh[point1, dim], newlh[point2, dim] = newlh[point2, dim], newlh[point1, dim]
newspread = spread(newlh)
if newspread < minspread:
lh = np.copy(newlh)
minspread = newspread
return np.asarray(lh)
def rbf(points, T):
"""
Build RBF-fit for given points (see Holmstrom, 2008 for details) using scaling matrix.
Parameters
----------
points : ndarray
Array of multi-d points with corresponding values [[x1, x2, .., xd, val], ...].
T : ndarray
Scaling matrix.
Returns
-------
fit : callable
Function that returns the value of the RBF-fit at a given point.
"""
n = len(points)
d = len(points[0])-1
sub = np.diagonal(np.subtract.outer(points[:,0:-1],points[:,0:-1]),axis1=1,axis2=3)
A = np.einsum('ji,kli',T,sub)
## A[d,N,N]
S = np.einsum('ijk,ijk->jk',A,A)
## S[N,N]
Phi = np.sqrt(np.multiply(S,np.multiply(S,S)))
P = np.ones((n, d+1))
P[:, 0:-1] = points[:, 0:-1]
F = points[:, -1]
M = np.zeros((n+d+1, n+d+1))
M[0:n, 0:n] = Phi
M[0:n, n:n+d+1] = P
M[n:n+d+1, 0:n] = np.transpose(P)
v = np.zeros(n+d+1)
v[0:n] = F
sol = np.linalg.solve(M, v)
lam, b, a = sol[0:n], sol[n:n+d], sol[n+d]
return fit
if __name__ == '__main__':
commands = {'init': runInit, 'next': runNext, 'analyze' : runAnalysis}
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subparser')
parser_init = subparsers.add_parser('init', help='Get an initial set of points')
parser_init.add_argument(
'N', help='Number of points to generate',type=int)
parser_init.add_argument(
'Output', help='Target filename for the output data',type=str)
parser_init.add_argument(
'Bounds', help='Pairs of lower and upper bounds',nargs='*',type=float)
parser_init.add_argument(
'-f', '--fmt', help='List of labels used in the output file', required=False,nargs='*',type=str)
parser_init.add_argument(
'-s', '--fst', help='List of formatting strings used in the output file', required=False,nargs='*',type=str)
parser_init.add_argument(
'-l', '--log', help='List of parameters (starting at 0) whose sampling will be in log-space', required=False,nargs='*',type=int)
parser_next = subparsers.add_parser('next', help='Get the next set of points')
parser_next.add_argument(
'N', help='Number of points to generate',type=int)
parser_next.add_argument(
'Input', help='Filename for the input data',type=str)
parser_next.add_argument(
'Output', help='Target filename for the output data',type=str)
parser_next.add_argument(
'-m', '--method', help='Method for choosing new points',type=str)
parser_next.add_argument(
'-p', '--plot', help='Base Filename for plots',type=str)
parser_next.add_argument(
'-f', '--fmt', help='List of labels used in the output file', required=False,nargs='*',type=str)
parser_next.add_argument(
'-s', '--fst', help='List of formatting strings used in the output file', required=False,nargs='*',type=str)
parser_next.add_argument(
'-l', '--log', help='List of parameters (starting at 0) whose sampling will be in log-space', required=False,nargs='*',type=int)
parser_next.add_argument(
'-a', '--args', help='Additional arguments for the optimizer', nargs='*')
parser_analyze = subparsers.add_parser('analyze', help='Take a look at the data')
parser_analyze.add_argument(
'Input', help='Filename for the input data')
parser_analyze.add_argument(
'-p', '--plot', help='Save a plot of the fit',type=str)
parser_analyze.add_argument(
'-m', '--method', help='Method for choosing new points',type=str)
parser_analyze.add_argument(
'-e', '--err', action='store_true', help='Plot the error in the fit')
parser_analyze.add_argument(
'-f', '--labels', help='List of labels to use in the plots', required=False,nargs='*',type=str)
parser_analyze.add_argument(
'-l', '--log', help='List of parameters (starting at 0) whose sampling will be in log-space. NOT YET IMPLEMENTED.', required=False,nargs='*',type=int)
parser_analyze.add_argument(
'-b', '--box', help='List of pairs of lower and upper bounds to use for analysis and plotting.', required=False,nargs='*')
parser_analyze.add_argument(
'-r', '--resolution', help='List of pairs of lower and upper bounds to use for analysis and plotting.', required=False, type=int)
args = parser.parse_args()
kwargs = {k:v for k,v in vars(args).iteritems() if v is not None}
commands = {'init': runInit, 'next': runNext, 'analyze' : runAnalysis}
command = commands[kwargs['subparser']]
del kwargs['subparser']
command(**kwargs)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
11748,
18540,
305,
919,
278,
355,
29034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
40085,
1096,
355,
1034,
198,
11748,
4866,
198,
11748,
2042,
... | 2.477837 | 4,309 |
from player import Player
import time
from tkinter import *
from tkinter.ttk import *
from ttkthemes import themed_tk as tk
pop = PopIt()
pop.run_game()
| [
6738,
2137,
1330,
7853,
201,
198,
11748,
640,
201,
198,
6738,
256,
74,
3849,
1330,
1635,
201,
198,
6738,
256,
74,
3849,
13,
926,
74,
1330,
1635,
201,
198,
6738,
256,
30488,
1169,
6880,
1330,
31892,
62,
30488,
355,
256,
74,
201,
198,... | 2.5 | 68 |
from distutils.version import StrictVersion
__all__ = ['VERSION']
VERSION = StrictVersion('0.14')
| [
6738,
1233,
26791,
13,
9641,
1330,
520,
2012,
14815,
198,
198,
834,
439,
834,
796,
37250,
43717,
20520,
198,
198,
43717,
796,
520,
2012,
14815,
10786,
15,
13,
1415,
11537,
198
] | 3.225806 | 31 |
import tempfile
from typing import List, Tuple, Optional
from mason.engines.scheduler.models.dags.invalid_dag_step import InvalidDagStep
from mason.engines.scheduler.models.dags.valid_dag_step import ValidDagStep
from asciidag.graph import Graph
from asciidag.node import Node
from mason.util.list import flatten_array, flatten
# TODO: Handles two parents with one child. Does not yet handle one parent with two children (well). | [
11748,
20218,
7753,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
11,
32233,
198,
198,
6738,
285,
888,
13,
1516,
1127,
13,
1416,
704,
18173,
13,
27530,
13,
67,
3775,
13,
259,
12102,
62,
67,
363,
62,
9662,
1330,
17665,
35,
363,
8600,... | 3.083916 | 143 |
#!/usr/bin/env python
#
# description: Rotting Oranges
# difficulty: Medium
# leetcode_num: 994
# leetcode_url: https://leetcode.com/problems/rotting-oranges/
#
# You are given an m x n grid where each cell can have one of three values:
# 0 representing an empty cell,
# 1 representing a fresh orange, or
# 2 representing a rotten orange.
# Every minute, any fresh orange that is 4-directionally adjacent to a rotten
# orange becomes rotten.
# Return the minimum number of minutes that must elapse until no cell has a
# fresh orange. If this is impossible, return -1.
#
# Example 1:
# Input: grid = [[2, 1, 1], [1, 1, 0], [0, 1, 1]]
# Output: 4
#
# Example 2:
# Input: grid = [[2, 1, 1], [0, 1, 1], [1, 0, 1]]
# Output: -1
# Explanation: The orange in the bottom left corner(row 2, column 0) is never
# rotten, because rotting only happens 4-directionally.
#
# Example 3:
# Input: grid = [[0, 2]]
# Output: 0
# Explanation: Since there are already no fresh oranges at minute 0, the
# answer is just 0.
#
# Constraints:
# m == grid.length
# n == grid[i].length
# 1 <= m, n <= 10
# grid[i][j] is 0, 1, or 2.
from collections import deque
import math
# Uses BFS to simulate the rotting oranges
if __name__ == '__main__':
test_cases = [
([[2, 1, 1], [1, 1, 0], [0, 1, 1]], 4),
([[2, 1, 1], [0, 1, 1], [1, 0, 1]], -1),
([[0, 2]], 0)
]
for inp, res in test_cases:
assert MinTimeToRotFreshOranges(inp) == res, 'Test Failed'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
6764,
25,
18481,
889,
1471,
6231,
198,
2,
8722,
25,
13398,
198,
2,
443,
316,
8189,
62,
22510,
25,
860,
5824,
198,
2,
443,
316,
8189,
62,
6371,
25,
3740,
1378,
293,
316... | 2.705341 | 543 |
#!/usr/bin/python -tt
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_cdb_user
short_description: create / delete a Rackspace Cloud Database
description:
- create / delete a database in the Cloud Databases.
version_added: "1.8"
options:
cdb_id:
description:
- The databases server UUID
default: null
db_username:
description:
- Name of the database user
default: null
db_password:
description:
- Database user password
default: null
databases:
description:
- Name of the databases that the user can access
default: []
host:
description:
- Specifies the host from which a user is allowed to connect to
the database. Possible values are a string containing an IPv4 address
or "%" to allow connecting from any host
default: '%'
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: "Simon JAILLET (@jails)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a user in Cloud Databases
tasks:
- name: User build request
local_action:
module: rax_cdb_user
credentials: ~/.raxpub
region: IAD
cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
db_username: user1
db_password: user1
databases: ['db1']
state: present
register: rax_db_user
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
532,
926,
198,
2,
770,
2393,
318,
636,
286,
28038,
856,
198,
2,
198,
2,
28038,
856,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
340,
739,
262,
2846,
286,
26... | 2.909964 | 833 |