hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7958e1d2d15748bf820b404326bfa0ad8981a466 | 783 | py | Python | app/chatcmd.py | yumimint/PSO2LogReader | b065ca2643776962d22cb10329443fbea5884ca4 | [
"MIT"
] | null | null | null | app/chatcmd.py | yumimint/PSO2LogReader | b065ca2643776962d22cb10329443fbea5884ca4 | [
"MIT"
] | null | null | null | app/chatcmd.py | yumimint/PSO2LogReader | b065ca2643776962d22cb10329443fbea5884ca4 | [
"MIT"
] | null | null | null | import re
cmd_rex = [
'toge|moya|[apt]',
'(mn|vo|symbol|' 'spage|swp|' 'mainpalette|mpal|'
'subpalette|spal|' 'myset|ms|' 'myfashion|mf)' r'\d+',
r'(face\d|fc\d|ce(all)?)' '( +(on|off))?',
r'uioff( +\d+)?',
r'ci\d+( +\d+)?( +t[1-5])?( +\d+)?',
'(skillring|sr|' 'costume|cs|' 'camouflage|cmf)' ' +[^ ]+',
r'[cfm]?la +[^ ]+( +ss?(\d+(\.\d+)?))?',
r'ce\d( +s(\d+(\.\d+)?))?',
r'stamp +[^ ]',
]
cmd_rex = r'^/(' + '|'.join(cmd_rex) + r') ?'
cmd_rex = re.compile(cmd_rex)
color_rex = re.compile(r'{(red|bei|gre|vio|blk|ora|yel|blu|pur|gra|whi|def)}')
def strip(text):
text = color_rex.sub('', text)
prev = None
while text and text != prev:
prev = text.lstrip()
text = cmd_rex.sub('', text).lstrip()
return text
| 27 | 78 | 0.498084 |
7958e2e481fe76ab5e7f2d3b737b64c32a6f7b11 | 2,682 | py | Python | tplbuild/output.py | msg555/tplbuild | 1517fa97b17df4883f6885a7fb3ccfe017576e53 | [
"BSD-3-Clause"
] | null | null | null | tplbuild/output.py | msg555/tplbuild | 1517fa97b17df4883f6885a7fb3ccfe017576e53 | [
"BSD-3-Clause"
] | null | null | null | tplbuild/output.py | msg555/tplbuild | 1517fa97b17df4883f6885a7fb3ccfe017576e53 | [
"BSD-3-Clause"
] | null | null | null | import random
import sys
from typing import List, Optional
class OutputStream:
"""
Class that manages writing to output for a single sub-process.
"""
def __init__(self, title: str, color: Optional[bytes] = None) -> None:
self.title = title
self.prefix = b""
if title:
if color:
self.prefix = color + title.encode("utf-8") + b"\x1b[0m: "
else:
self.prefix = title.encode("utf-8") + b": "
async def __aenter__(self) -> "OutputStream":
return self
async def __aexit__(self, exc_typ, exc_val, exc_tb) -> None:
await self.end(exc_typ is None)
async def write(self, line: bytes, *, err: bool = False) -> None:
"""
Write a single line of data to the output stream. Set err=True
to write to the error stream instead of output stream.
"""
stream = sys.stderr.buffer if err else sys.stdout.buffer
stream.write(self.prefix)
stream.write(line)
if not line.endswith(b"\n"):
stream.write(b"\n")
stream.flush()
async def end(self, success: bool) -> None:
"""
End the output stream. If success is False buffered error content
may be redisplayed.
"""
class OutputStreamer:
"""
Class responsible for creating output streams from sub-commands and organizing
how those outputs are displayed. For now this is just a single concreate
implementation that writes output as it comes in directly to stdout/stderr.
Arguments:
use_color: If true ANSI color escape codes will be used to highlight the
titles of the output streams.
"""
def __init__(self, *, use_color: bool = True) -> None:
self.use_color = use_color
self.remaining_colors: List[bytes] = []
def _reset_colors(self) -> None:
"""
Title colors are picked randomly with replacement using the 16-color
ANSI codes. We intentionally avoid white/black variants giving us in
total 12 colors.
"""
self.remaining_colors = [
*(f"\u001b[{i}m".encode("utf-8") for i in range(31, 37)),
*(f"\u001b[{i};1m".encode("utf-8") for i in range(31, 37)),
]
random.shuffle(self.remaining_colors)
def start_stream(self, title: str) -> OutputStream:
"""
Create a new output stream with the given title.
"""
color = None
if self.use_color:
if not self.remaining_colors:
self._reset_colors()
color = self.remaining_colors.pop()
return OutputStream(title, color)
| 32.707317 | 82 | 0.598061 |
7958e36e43f04d735db6d6e2fd678f506ae9f526 | 6,330 | py | Python | openpeerpower/components/zha/entity.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | openpeerpower/components/zha/entity.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | openpeerpower/components/zha/entity.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | 1 | 2019-04-24T14:10:08.000Z | 2019-04-24T14:10:08.000Z | """Entity for Zigbee Home Automation."""
import asyncio
import logging
import time
from openpeerpower.core import callback
from openpeerpower.helpers import entity
from openpeerpower.helpers.device_registry import CONNECTION_ZIGBEE
from openpeerpower.helpers.dispatcher import async_dispatcher_connect
from openpeerpower.helpers.restore_state import RestoreEntity
from .core.const import (
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
DATA_ZHA,
DATA_ZHA_BRIDGE_ID,
DOMAIN,
SIGNAL_REMOVE,
)
from .core.helpers import LogMixin
_LOGGER = logging.getLogger(__name__)
ENTITY_SUFFIX = "entity_suffix"
RESTART_GRACE_PERIOD = 7200 # 2 hours
class ZhaEntity(RestoreEntity, LogMixin, entity.Entity):
"""A base class for ZHA entities."""
def __init__(self, unique_id, zha_device, channels, skip_entity_id=False, **kwargs):
"""Init ZHA entity."""
self._force_update = False
self._should_poll = False
self._unique_id = unique_id
ieeetail = "".join([f"{o:02x}" for o in zha_device.ieee[:4]])
ch_names = [ch.cluster.ep_attribute for ch in channels]
ch_names = ", ".join(sorted(ch_names))
self._name = f"{zha_device.name} {ieeetail} {ch_names}"
self._state = None
self._device_state_attributes = {}
self._zha_device = zha_device
self.cluster_channels = {}
self._available = False
self._component = kwargs["component"]
self._unsubs = []
self.remove_future = None
for channel in channels:
self.cluster_channels[channel.name] = channel
@property
def name(self):
"""Return Entity's default name."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def zha_device(self):
"""Return the zha device this entity is attached to."""
return self._zha_device
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return self._device_state_attributes
@property
def force_update(self) -> bool:
"""Force update this entity."""
return self._force_update
@property
def should_poll(self) -> bool:
"""Poll state from device."""
return self._should_poll
@property
def device_info(self):
"""Return a device description for device registry."""
zha_device_info = self._zha_device.device_info
ieee = zha_device_info["ieee"]
return {
"connections": {(CONNECTION_ZIGBEE, ieee)},
"identifiers": {(DOMAIN, ieee)},
ATTR_MANUFACTURER: zha_device_info[ATTR_MANUFACTURER],
ATTR_MODEL: zha_device_info[ATTR_MODEL],
ATTR_NAME: zha_device_info[ATTR_NAME],
"via_device": (DOMAIN, self.opp.data[DATA_ZHA][DATA_ZHA_BRIDGE_ID]),
}
@property
def available(self):
"""Return entity availability."""
return self._available
@callback
def async_set_available(self, available):
"""Set entity availability."""
self._available = available
self.async_schedule_update_op_state()
@callback
def async_update_state_attribute(self, key, value):
"""Update a single device state attribute."""
self._device_state_attributes.update({key: value})
self.async_schedule_update_op_state()
@callback
def async_set_state(self, state):
"""Set the entity state."""
pass
async def async_added_to_opp(self):
"""Run when about to be added to opp."""
await super().async_added_to_opp()
self.remove_future = asyncio.Future()
await self.async_check_recently_seen()
await self.async_accept_signal(
None,
"{}_{}".format(self.zha_device.available_signal, "entity"),
self.async_set_available,
signal_override=True,
)
await self.async_accept_signal(
None,
"{}_{}".format(SIGNAL_REMOVE, str(self.zha_device.ieee)),
self.async_remove,
signal_override=True,
)
self._zha_device.gateway.register_entity_reference(
self._zha_device.ieee,
self.entity_id,
self._zha_device,
self.cluster_channels,
self.device_info,
self.remove_future,
)
async def async_check_recently_seen(self):
"""Check if the device was seen within the last 2 hours."""
last_state = await self.async_get_last_state()
if (
last_state
and self._zha_device.last_seen
and (time.time() - self._zha_device.last_seen < RESTART_GRACE_PERIOD)
):
self.async_set_available(True)
if not self.zha_device.is_mains_powered:
# mains powered devices will get real time state
self.async_restore_last_state(last_state)
self._zha_device.set_available(True)
async def async_will_remove_from_opp(self) -> None:
"""Disconnect entity object when removed."""
for unsub in self._unsubs[:]:
unsub()
self._unsubs.remove(unsub)
self.zha_device.gateway.remove_entity_reference(self)
self.remove_future.set_result(True)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
pass
async def async_update(self):
"""Retrieve latest state."""
for channel in self.cluster_channels.values():
if hasattr(channel, "async_update"):
await channel.async_update()
async def async_accept_signal(self, channel, signal, func, signal_override=False):
"""Accept a signal from a channel."""
unsub = None
if signal_override:
unsub = async_dispatcher_connect(self.opp, signal, func)
else:
unsub = async_dispatcher_connect(
self.opp, f"{channel.unique_id}_{signal}", func
)
self._unsubs.append(unsub)
def log(self, level, msg, *args):
"""Log a message."""
msg = f"%s: {msg}"
args = (self.entity_id,) + args
_LOGGER.log(level, msg, *args)
| 32.628866 | 88 | 0.631438 |
7958e38463d45d7bad039d45a390e42785908c0d | 3,718 | py | Python | src/fuse_utils.py | amosproj/amos2021ss03-synthetic-file-system | b274556808c07eeef34f37d4449824fa9f020a16 | [
"MIT"
] | 5 | 2021-04-14T14:52:41.000Z | 2021-06-11T09:22:21.000Z | src/fuse_utils.py | amosproj/amos2021ss03-synthetic-file-system | b274556808c07eeef34f37d4449824fa9f020a16 | [
"MIT"
] | 60 | 2021-04-14T08:43:17.000Z | 2021-07-14T13:19:30.000Z | src/fuse_utils.py | amosproj/amos2021ss03-synthetic-file-system | b274556808c07eeef34f37d4449824fa9f020a16 | [
"MIT"
] | 3 | 2021-04-20T19:11:01.000Z | 2021-05-19T09:44:53.000Z | # 3rd party imports
from anytree import Node, Resolver
from typing import Dict, List
def build_tree_from_files(files: List[Dict]) -> Node:
"""Builds a directory tree out of the results from the metadata-hub query.
The tree is build upon a "Root" node which is returned at the end.
Each dict contains the entire result but only the file paths are used.
Args:
files (List[Dict]): list containing the results from the mdh query
Returns:
Node: The root node (anytree.Node) of the resulting tree
"""
# '/home/dome_/test_tree'
root_node = Node("Root")
parent_finder = Resolver("name")
file_paths = _extract_file_paths_parts(files)
max_index = _length_of_longest_path(file_paths)
for i in range(max_index):
# In every iteration of the outer loop we only work on parts up to position i
for file_path in file_paths:
if i >= len(file_path):
# After reaching the last part of a path it can be skipped
continue
last_path_node = file_path[i]
path_without_last_node = _create_path_from_parts(file_path[:i])
parent_node = parent_finder.get(root_node, path_without_last_node)
if not _parent_has_child(parent_node, last_path_node):
Node(last_path_node, parent_node)
return root_node
def _extract_file_paths_parts(files: List[Dict]) -> List[List[str]]:
"""Extracts the file paths from each dict which contains the entire result
Args:
files (List[Dict]): list containing all results from the mdh.
Returns:
List[List[str]]: list containing only the paths. Each path is a list of its parts.
"""
file_paths = []
for file in files:
full_file_path = ""
for metadata in file['metadata']:
if metadata['name'] == "SourceFile":
full_file_path = metadata['value']
file_paths.append(full_file_path)
file_paths_parts = []
for file_path in file_paths:
# if file_path.startswith('/home/dome_/test_tree'):
# file_path = file_path[len('/home/dome_/test_tree'):]
file_paths_parts.append(file_path.split("/")[1:])
return file_paths_parts
def _create_path_from_parts(path_parts: List[str]) -> str:
"""Creates the typical path format from a list of the individual parts
Args:
path_parts (List[str]): list containing the parts of a path
Ex.: ['home', 'usr', 'dir1']
Returns:
str: Concatenation of the parts to a path format
Ex..: home/usr/dir
"""
return '/'.join(path_parts)
def _parent_has_child(parent_node: Node, name: str) -> bool:
"""Tests if the parent node has a child with the name given by the second argument
Args:
parent_node (Node): the parent node for the path without the last element given by name
name (str): name corresponds to the last item of the path
Returns:
bool: True if the parent node has a child with the specified name
"""
for child in parent_node.children:
if child.name == name:
return True
return False
def _length_of_longest_path(file_paths: List[List[str]]) -> int:
"""Determines the length of the longest path out of all the paths
Args:
file_paths (List[List[str]]): a list containing all file paths which are lists of the parts
Ex.: [['home', 'usr', 'dir1'], ['home', 'usr', 'dir2', 'file1'], ['home' , 'usr2']]
Returns:
int: The length of the longest path in the list - Ex.: 4
"""
lengths_of_paths = [len(path) for path in file_paths]
return max(lengths_of_paths, default=0)
| 36.45098 | 99 | 0.647122 |
7958e3abaaeb6ae06b9bb1714ee48ec1921a1fd6 | 13,865 | py | Python | src/OFS/tests/testFileAndImage.py | hitotsunorb1/Zope | 6beac6385e1a302903889561385013874ef94cb1 | [
"ZPL-2.1"
] | null | null | null | src/OFS/tests/testFileAndImage.py | hitotsunorb1/Zope | 6beac6385e1a302903889561385013874ef94cb1 | [
"ZPL-2.1"
] | null | null | null | src/OFS/tests/testFileAndImage.py | hitotsunorb1/Zope | 6beac6385e1a302903889561385013874ef94cb1 | [
"ZPL-2.1"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import time
import unittest
from io import BytesIO
import six
from six import PY3
import OFS.Image
import Testing.testbrowser
import Testing.ZopeTestCase
import transaction
import Zope2
from Acquisition import aq_base
from App.Common import rfc1123_date
from OFS.Application import Application
from OFS.Cache import ZCM_MANAGERS
from OFS.Image import Pdata
from OFS.SimpleItem import SimpleItem
from Testing.makerequest import makerequest
from zExceptions import Redirect
from zope.component import adapter
from zope.lifecycleevent.interfaces import IObjectCreatedEvent
from zope.lifecycleevent.interfaces import IObjectModifiedEvent
from ZPublisher.HTTPRequest import HTTPRequest
from ZPublisher.HTTPResponse import HTTPResponse
here = os.path.dirname(os.path.abspath(__file__))
filedata = os.path.join(here, 'test.gif')
Zope2.startup_wsgi()
def makeConnection():
import ZODB
from ZODB.DemoStorage import DemoStorage
s = DemoStorage()
return ZODB.DB(s).open()
def aputrequest(file, content_type):
resp = HTTPResponse(stdout=sys.stdout)
environ = {}
environ['SERVER_NAME'] = 'foo'
environ['SERVER_PORT'] = '80'
environ['REQUEST_METHOD'] = 'PUT'
environ['CONTENT_TYPE'] = content_type
req = HTTPRequest(stdin=file, environ=environ, response=resp)
return req
class DummyCache(object):
def __init__(self):
self.clear()
def ZCache_set(self, ob, data, view_name='', keywords=None,
mtime_func=None):
self.set = (ob, data)
def ZCache_get(self, ob, data, view_name='', keywords=None,
mtime_func=None):
self.get = ob
if self.si:
return self.si
def ZCache_invalidate(self, ob):
self.invalidated = ob
def clear(self):
self.set = None
self.get = None
self.invalidated = None
self.si = None
def setStreamIterator(self, si):
self.si = si
ADummyCache = DummyCache()
class DummyCacheManager(SimpleItem):
def ZCacheManager_getCache(self):
return ADummyCache
class EventCatcher(object):
def __init__(self):
self.created = []
self.modified = []
self.setUp()
def setUp(self):
from zope.component import provideHandler
provideHandler(self.handleCreated)
provideHandler(self.handleModified)
def tearDown(self):
from zope.component import getSiteManager
getSiteManager().unregisterHandler(self.handleCreated)
getSiteManager().unregisterHandler(self.handleModified)
def reset(self):
self.created = []
self.modified = []
@adapter(IObjectCreatedEvent)
def handleCreated(self, event):
if isinstance(event.object, OFS.Image.File):
self.created.append(event)
@adapter(IObjectModifiedEvent)
def handleModified(self, event):
if isinstance(event.object, OFS.Image.File):
self.modified.append(event)
class FileTests(unittest.TestCase):
content_type = 'application/octet-stream'
factory = 'manage_addFile'
def setUp(self):
with open(filedata, 'rb') as fd:
self.data = fd.read()
self.connection = makeConnection()
self.eventCatcher = EventCatcher()
try:
r = self.connection.root()
a = Application()
r['Application'] = a
self.root = a
responseOut = self.responseOut = BytesIO()
self.app = makerequest(self.root, stdout=responseOut)
self.app.dcm = DummyCacheManager()
factory = getattr(self.app, self.factory)
factory('file',
file=self.data, content_type=self.content_type)
self.app.file.ZCacheable_setManagerId('dcm')
self.app.file.ZCacheable_setEnabled(enabled=1)
setattr(self.app, ZCM_MANAGERS, ('dcm',))
# Hack, we need a _p_mtime for the file, so we make sure that it
# has one.
transaction.commit()
except Exception:
transaction.abort()
self.connection.close()
raise
transaction.begin()
self.file = getattr(self.app, 'file')
# Since we do the create here, let's test the events here too
self.assertEqual(1, len(self.eventCatcher.created))
self.assertTrue(
aq_base(self.eventCatcher.created[0].object) is aq_base(self.file))
self.assertEqual(1, len(self.eventCatcher.modified))
self.assertTrue(
aq_base(self.eventCatcher.created[0].object) is aq_base(self.file))
self.eventCatcher.reset()
def tearDown(self):
del self.file
transaction.abort()
self.connection.close()
del self.app
del self.responseOut
del self.root
del self.connection
ADummyCache.clear()
self.eventCatcher.tearDown()
def testViewImageOrFile(self):
self.assertRaises(Redirect, self.file.view_image_or_file, 'foo')
def testUpdateData(self):
self.file.update_data(b'foo')
self.assertEqual(self.file.size, 3)
self.assertEqual(self.file.data, b'foo')
self.assertTrue(ADummyCache.invalidated)
self.assertTrue(ADummyCache.set)
def testReadData(self):
s = b'a' * (2 << 16)
data, size = self.file._read_data(BytesIO(s))
self.assertIsInstance(data, Pdata)
self.assertEqual(bytes(data), s)
self.assertEqual(len(s), len(bytes(data)))
self.assertEqual(len(s), size)
def testBigPdata(self):
# Test that a big enough string is split into several Pdata
# From a file
s = b'a' * (1 << 16) * 3
data, size = self.file._read_data(BytesIO(s))
self.assertNotEqual(data.next, None)
# From a string
data, size = self.file._read_data(s)
self.assertNotEqual(data.next, None)
def testManageEditWithFileData(self):
self.file.manage_edit('foobar', 'text/plain', filedata=b'ASD')
self.assertEqual(self.file.title, 'foobar')
self.assertEqual(self.file.content_type, 'text/plain')
self.assertTrue(ADummyCache.invalidated)
self.assertTrue(ADummyCache.set)
self.assertEqual(1, len(self.eventCatcher.modified))
self.assertTrue(self.eventCatcher.modified[0].object is self.file)
def testManageEditWithoutFileData(self):
self.file.manage_edit('foobar', 'text/plain')
self.assertEqual(self.file.title, 'foobar')
self.assertEqual(self.file.content_type, 'text/plain')
self.assertTrue(ADummyCache.invalidated)
self.assertEqual(1, len(self.eventCatcher.modified))
self.assertTrue(self.eventCatcher.modified[0].object is self.file)
def testManageUpload(self):
f = BytesIO(b'jammyjohnson')
self.file.manage_upload(f)
self.assertEqual(self.file.data, b'jammyjohnson')
self.assertEqual(self.file.content_type, 'application/octet-stream')
self.assertEqual(1, len(self.eventCatcher.modified))
self.assertTrue(self.eventCatcher.modified[0].object is self.file)
def testManageUploadWithoutFileData(self):
self.file.manage_upload()
self.assertEqual(0, len(self.eventCatcher.modified))
def testIfModSince(self):
now = time.time()
e = {'SERVER_NAME': 'foo',
'SERVER_PORT': '80',
'REQUEST_METHOD': 'GET'}
# not modified since
t_notmod = rfc1123_date(now)
e['HTTP_IF_MODIFIED_SINCE'] = t_notmod
out = BytesIO()
resp = HTTPResponse(stdout=out)
req = HTTPRequest(sys.stdin, e, resp)
data = self.file.index_html(req, resp)
self.assertEqual(resp.getStatus(), 304)
self.assertEqual(data, b'')
# modified since
t_mod = rfc1123_date(now - 100)
e['HTTP_IF_MODIFIED_SINCE'] = t_mod
out = BytesIO()
resp = HTTPResponse(stdout=out)
req = HTTPRequest(sys.stdin, e, resp)
data = self.file.index_html(req, resp)
self.assertEqual(resp.getStatus(), 200)
self.assertEqual(data, bytes(self.file.data))
def testIndexHtmlWithPdata(self):
self.file.manage_upload(b'a' * (2 << 16)) # 128K
self.file.index_html(self.app.REQUEST, self.app.REQUEST.RESPONSE)
self.assertTrue(self.app.REQUEST.RESPONSE._wrote)
def testIndexHtmlWithString(self):
self.file.manage_upload(b'a' * 100) # 100 bytes
self.file.index_html(self.app.REQUEST, self.app.REQUEST.RESPONSE)
self.assertTrue(not self.app.REQUEST.RESPONSE._wrote)
def testPrincipiaSearchSource_not_text(self):
data = ''.join([chr(x) for x in range(256)])
if PY3:
data = data.encode('utf-8')
self.file.manage_edit('foobar', 'application/octet-stream',
filedata=data)
self.assertEqual(self.file.PrincipiaSearchSource(), b'')
def testPrincipiaSearchSource_text(self):
self.file.manage_edit('foobar', 'text/plain',
filedata=b'Now is the time for all good men to '
b'come to the aid of the Party.')
self.assertTrue(b'Party' in self.file.PrincipiaSearchSource())
def test_interfaces(self):
from zope.interface.verify import verifyClass
from OFS.Image import File
from OFS.interfaces import IWriteLock
from ZPublisher.HTTPRangeSupport import HTTPRangeInterface
verifyClass(HTTPRangeInterface, File)
verifyClass(IWriteLock, File)
def testUnicode(self):
val = u'some unicode string here'
self.assertRaises(TypeError, self.file.update_data,
data=val, content_type='text/plain')
class ImageTests(FileTests):
content_type = 'image/gif'
factory = 'manage_addImage'
def testUpdateData(self):
self.file.update_data(self.data)
self.assertEqual(self.file.size, len(self.data))
self.assertEqual(self.file.data, self.data)
self.assertEqual(self.file.width, 16)
self.assertEqual(self.file.height, 16)
self.assertTrue(ADummyCache.invalidated)
self.assertTrue(ADummyCache.set)
def testTag(self):
tag_fmt = ('<img src="http://nohost/file" '
'alt="%s" title="%s" height="16" width="16" />')
self.assertEqual(self.file.tag(), (tag_fmt % ('', '')))
self.file.manage_changeProperties(title='foo')
self.assertEqual(self.file.tag(), (tag_fmt % ('', 'foo')))
self.file.manage_changeProperties(alt='bar')
self.assertEqual(self.file.tag(), (tag_fmt % ('bar', 'foo')))
def testViewImageOrFile(self):
request = self.app.REQUEST
response = request.RESPONSE
result = self.file.index_html(request, response)
self.assertEqual(result, self.data)
def test_interfaces(self):
from zope.interface.verify import verifyClass
from OFS.Image import Image
from OFS.interfaces import IWriteLock
verifyClass(IWriteLock, Image)
def test_text_representation_is_tag(self):
self.assertEqual(six.text_type(self.file),
'<img src="http://nohost/file"'
' alt="" title="" height="16" width="16" />')
class FileEditTests(Testing.ZopeTestCase.FunctionalTestCase):
"""Browser testing ..Image.File"""
def setUp(self):
super(FileEditTests, self).setUp()
uf = self.app.acl_users
uf.userFolderAddUser('manager', 'manager_pass', ['Manager'], [])
self.app.manage_addFile('file')
transaction.commit()
self.browser = Testing.testbrowser.Browser()
self.browser.login('manager', 'manager_pass')
def test_Image__manage_main__1(self):
"""It shows the content of text files as text."""
self.app.file.update_data(u'hällo'.encode('utf-8'))
self.browser.open('http://localhost/file/manage_main')
text = self.browser.getControl(name='filedata:text').value
self.assertEqual(text, 'hällo')
@unittest.skipIf(six.PY2, "feature not supported on Python 2")
def test_Image__manage_main__3(self):
"""It shows an error message if the file content cannot be decoded."""
self.app.file.update_data(u'hällo'.encode('latin-1'))
self.browser.open('http://localhost/file/manage_main')
self.assertIn(
"The file could not be decoded with 'utf-8'.",
self.browser.contents)
def test_Image__manage_upload__1(self):
"""It uploads a file, replaces the content and sets content type."""
self.browser.open('http://localhost/file/manage_main')
self.browser.getControl(name='file').add_file(
b'test text file', 'text/plain', 'TestFile.txt')
self.browser.getControl('Upload File').click()
self.assertIn('Saved changes', self.browser.contents)
self.assertEqual(
self.browser.getControl('Content Type').value, 'text/plain')
text = self.browser.getControl(name='filedata:text').value
self.assertEqual(text, 'test text file')
def test_Image__manage_edit__1(self):
"""It it possible to change the file's content via browser."""
self.browser.open('http://localhost/file/manage_main')
text_1 = self.browser.getControl(name='filedata:text').value
self.assertEqual(text_1, '')
self.browser.getControl(name='filedata:text').value = u'hällo'
self.browser.getControl('Save Changes').click()
self.assertIn('Saved changes', self.browser.contents)
text_2 = self.browser.getControl(name='filedata:text').value
self.assertEqual(text_2, 'hällo')
| 35.190355 | 79 | 0.644428 |
7958e3b59f5ad86fd0baef4f2595df67752cc8e3 | 1,610 | py | Python | tests/test_ingest_open_access.py | ourresearch/journalsdb | 169feb9be684eac59f3294dccdb319eb10fe1958 | [
"MIT"
] | 8 | 2021-02-01T21:00:20.000Z | 2022-01-25T09:51:24.000Z | tests/test_ingest_open_access.py | ourresearch/journalsdb | 169feb9be684eac59f3294dccdb319eb10fe1958 | [
"MIT"
] | 43 | 2021-04-28T00:20:53.000Z | 2022-03-09T00:39:56.000Z | tests/test_ingest_open_access.py | ourresearch/journalsdb | 169feb9be684eac59f3294dccdb319eb10fe1958 | [
"MIT"
] | null | null | null | import pandas as pd
import pytest
from ingest.open_access import import_open_access
from models.usage import OpenAccess
from views import app
test_data = {
"issn_l": ["2291-5222"],
"title": ["Tropical Parasitology"],
"year": ["2010"],
"num_dois": ["10"],
"num_open": [7],
"open_rate": ["0.7"],
"num_green": [7],
"green_rate": ["0.7"],
"num_bronze": [0],
"bronze_rate": ["0.0"],
"num_hybrid": [0],
"hybrid_rate": ["0.0"],
"num_gold": [0],
"gold_rate": ["0.0"],
"is_in_doaj": [False],
"is_gold_journal": [False],
}
@pytest.mark.skip(reason="need to refactor due to open access import changes")
def test_import_open_access(ingest_client, mocker):
mocker.patch(
"ingest.open_access.pd.read_csv",
return_value=[pd.DataFrame(data=test_data)],
)
# run command
runner = app.test_cli_runner()
runner.invoke(import_open_access)
oa = OpenAccess.query.filter_by(issn_l="2291-5222").first()
assert oa.is_in_doaj is False
assert oa.year == 2010
assert oa.num_dois == 10
assert oa.open_rate == 0.7
@pytest.mark.skip(reason="need to refactor due to open access import changes")
def test_import_open_access_no_duplicate(api_client, mocker):
mocker.patch(
"ingest.open_access.pd.read_csv",
return_value=[pd.DataFrame(data=test_data)],
)
# run command
runner = app.test_cli_runner()
runner.invoke(import_open_access)
# run again
runner.invoke(import_open_access)
oas = OpenAccess.query.filter_by(issn_l="2291-5222").all()
assert len(oas) == 1
| 24.769231 | 78 | 0.652795 |
7958e3c0a6e65ba617fa92f3006a44cc0275c1bb | 20,656 | py | Python | google/resumable_media/_download.py | google/google-resumable-upload-python | 82f9769f3368404d1854dd22eeed34eeb25ea835 | [
"Apache-2.0"
] | null | null | null | google/resumable_media/_download.py | google/google-resumable-upload-python | 82f9769f3368404d1854dd22eeed34eeb25ea835 | [
"Apache-2.0"
] | null | null | null | google/resumable_media/_download.py | google/google-resumable-upload-python | 82f9769f3368404d1854dd22eeed34eeb25ea835 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Virtual bases classes for downloading media from Google APIs."""
import http.client
import re
from google.resumable_media import _helpers
from google.resumable_media import common
_CONTENT_RANGE_RE = re.compile(
r"bytes (?P<start_byte>\d+)-(?P<end_byte>\d+)/(?P<total_bytes>\d+)",
flags=re.IGNORECASE,
)
_ACCEPTABLE_STATUS_CODES = (http.client.OK, http.client.PARTIAL_CONTENT)
_GET = "GET"
_ZERO_CONTENT_RANGE_HEADER = "bytes */0"
class DownloadBase(object):
"""Base class for download helpers.
Defines core shared behavior across different download types.
Args:
media_url (str): The URL containing the media to be downloaded.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
the downloaded resource can be written to.
start (int): The first byte in a range to be downloaded.
end (int): The last byte in a range to be downloaded.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
"""
def __init__(self, media_url, stream=None, start=None, end=None, headers=None):
self.media_url = media_url
self._stream = stream
self.start = start
self.end = end
if headers is None:
headers = {}
self._headers = headers
self._finished = False
self._retry_strategy = common.RetryStrategy()
@property
def finished(self):
"""bool: Flag indicating if the download has completed."""
return self._finished
@staticmethod
def _get_status_code(response):
"""Access the status code from an HTTP response.
Args:
response (object): The HTTP response object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
@staticmethod
def _get_headers(response):
"""Access the headers from an HTTP response.
Args:
response (object): The HTTP response object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
@staticmethod
def _get_body(response):
"""Access the response body from an HTTP response.
Args:
response (object): The HTTP response object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
class Download(DownloadBase):
"""Helper to manage downloading a resource from a Google API.
"Slices" of the resource can be retrieved by specifying a range
with ``start`` and / or ``end``. However, in typical usage, neither
``start`` nor ``end`` is expected to be provided.
Args:
media_url (str): The URL containing the media to be downloaded.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
the downloaded resource can be written to.
start (int): The first byte in a range to be downloaded. If not
provided, but ``end`` is provided, will download from the
beginning to ``end`` of the media.
end (int): The last byte in a range to be downloaded. If not
provided, but ``start`` is provided, will download from the
``start`` to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. The response headers must contain
a checksum of the requested type. If the headers lack an
appropriate checksum (for instance in the case of transcoded or
ranged downloads where the remote service does not know the
correct checksum) an INFO-level log will be emitted. Supported
values are "md5", "crc32c" and None.
"""
def __init__(
self, media_url, stream=None, start=None, end=None, headers=None, checksum="md5"
):
super(Download, self).__init__(
media_url, stream=stream, start=start, end=end, headers=headers
)
self.checksum = checksum
self._bytes_downloaded = 0
self._expected_checksum = None
self._checksum_object = None
self._object_generation = None
def _prepare_request(self):
"""Prepare the contents of an HTTP request.
This is everything that must be done before a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
Returns:
Tuple[str, str, NoneType, Mapping[str, str]]: The quadruple
* HTTP verb for the request (always GET)
* the URL for the request
* the body of the request (always :data:`None`)
* headers for the request
Raises:
ValueError: If the current :class:`Download` has already
finished.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
if self.finished:
raise ValueError("A download can only be used once.")
add_bytes_range(self.start, self.end, self._headers)
return _GET, self.media_url, None, self._headers
def _process_response(self, response):
"""Process the response from an HTTP request.
This is everything that must be done after a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
Args:
response (object): The HTTP response object.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
# Tombstone the current Download so it cannot be used again.
self._finished = True
_helpers.require_status_code(
response, _ACCEPTABLE_STATUS_CODES, self._get_status_code
)
def consume(self, transport, timeout=None):
"""Consume the resource to be downloaded.
If a ``stream`` is attached to this download, then the downloaded
resource will be written to the stream.
Args:
transport (object): An object which can make authenticated
requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
class ChunkedDownload(DownloadBase):
"""Download a resource in chunks from a Google API.
Args:
media_url (str): The URL containing the media to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each
request.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
will be used to concatenate chunks of the resource as they are
downloaded.
start (int): The first byte in a range to be downloaded. If not
provided, defaults to ``0``.
end (int): The last byte in a range to be downloaded. If not
provided, will download to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with each request, e.g. headers for data encryption
key headers.
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each request.
Raises:
ValueError: If ``start`` is negative.
"""
def __init__(self, media_url, chunk_size, stream, start=0, end=None, headers=None):
if start < 0:
raise ValueError(
"On a chunked download the starting " "value cannot be negative."
)
super(ChunkedDownload, self).__init__(
media_url, stream=stream, start=start, end=end, headers=headers
)
self.chunk_size = chunk_size
self._bytes_downloaded = 0
self._total_bytes = None
self._invalid = False
@property
def bytes_downloaded(self):
"""int: Number of bytes that have been downloaded."""
return self._bytes_downloaded
@property
def total_bytes(self):
"""Optional[int]: The total number of bytes to be downloaded."""
return self._total_bytes
@property
def invalid(self):
"""bool: Indicates if the download is in an invalid state.
This will occur if a call to :meth:`consume_next_chunk` fails.
"""
return self._invalid
def _get_byte_range(self):
"""Determines the byte range for the next request.
Returns:
Tuple[int, int]: The pair of begin and end byte for the next
chunked request.
"""
curr_start = self.start + self.bytes_downloaded
curr_end = curr_start + self.chunk_size - 1
# Make sure ``curr_end`` does not exceed ``end``.
if self.end is not None:
curr_end = min(curr_end, self.end)
# Make sure ``curr_end`` does not exceed ``total_bytes - 1``.
if self.total_bytes is not None:
curr_end = min(curr_end, self.total_bytes - 1)
return curr_start, curr_end
def _prepare_request(self):
"""Prepare the contents of an HTTP request.
This is everything that must be done before a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
.. note:
This method will be used multiple times, so ``headers`` will
be mutated in between requests. However, we don't make a copy
since the same keys are being updated.
Returns:
Tuple[str, str, NoneType, Mapping[str, str]]: The quadruple
* HTTP verb for the request (always GET)
* the URL for the request
* the body of the request (always :data:`None`)
* headers for the request
Raises:
ValueError: If the current download has finished.
ValueError: If the current download is invalid.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
if self.finished:
raise ValueError("Download has finished.")
if self.invalid:
raise ValueError("Download is invalid and cannot be re-used.")
curr_start, curr_end = self._get_byte_range()
add_bytes_range(curr_start, curr_end, self._headers)
return _GET, self.media_url, None, self._headers
def _make_invalid(self):
"""Simple setter for ``invalid``.
This is intended to be passed along as a callback to helpers that
raise an exception so they can mark this instance as invalid before
raising.
"""
self._invalid = True
def _process_response(self, response):
"""Process the response from an HTTP request.
This is everything that must be done after a request that doesn't
require network I/O. This is based on the `sans-I/O`_ philosophy.
For the time being, this **does require** some form of I/O to write
a chunk to ``stream``. However, this will (almost) certainly not be
network I/O.
Updates the current state after consuming a chunk. First,
increments ``bytes_downloaded`` by the number of bytes in the
``content-length`` header.
If ``total_bytes`` is already set, this assumes (but does not check)
that we already have the correct value and doesn't bother to check
that it agrees with the headers.
We expect the **total** length to be in the ``content-range`` header,
but this header is only present on requests which sent the ``range``
header. This response header should be of the form
``bytes {start}-{end}/{total}`` and ``{end} - {start} + 1``
should be the same as the ``Content-Length``.
Args:
response (object): The HTTP response object (need headers).
Raises:
~google.resumable_media.common.InvalidResponse: If the number
of bytes in the body doesn't match the content length header.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
# Verify the response before updating the current instance.
if _check_for_zero_content_range(
response, self._get_status_code, self._get_headers
):
self._finished = True
return
_helpers.require_status_code(
response,
_ACCEPTABLE_STATUS_CODES,
self._get_status_code,
callback=self._make_invalid,
)
headers = self._get_headers(response)
response_body = self._get_body(response)
start_byte, end_byte, total_bytes = get_range_info(
response, self._get_headers, callback=self._make_invalid
)
transfer_encoding = headers.get("transfer-encoding")
if transfer_encoding is None:
content_length = _helpers.header_required(
response,
"content-length",
self._get_headers,
callback=self._make_invalid,
)
num_bytes = int(content_length)
if len(response_body) != num_bytes:
self._make_invalid()
raise common.InvalidResponse(
response,
"Response is different size than content-length",
"Expected",
num_bytes,
"Received",
len(response_body),
)
else:
# 'content-length' header not allowed with chunked encoding.
num_bytes = end_byte - start_byte + 1
# First update ``bytes_downloaded``.
self._bytes_downloaded += num_bytes
# If the end byte is past ``end`` or ``total_bytes - 1`` we are done.
if self.end is not None and end_byte >= self.end:
self._finished = True
elif end_byte >= total_bytes - 1:
self._finished = True
# NOTE: We only use ``total_bytes`` if not already known.
if self.total_bytes is None:
self._total_bytes = total_bytes
# Write the response body to the stream.
self._stream.write(response_body)
def consume_next_chunk(self, transport, timeout=None):
"""Consume the next chunk of the resource to be downloaded.
Args:
transport (object): An object which can make authenticated
requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
def add_bytes_range(start, end, headers):
"""Add a bytes range to a header dictionary.
Some possible inputs and the corresponding bytes ranges::
>>> headers = {}
>>> add_bytes_range(None, None, headers)
>>> headers
{}
>>> add_bytes_range(500, 999, headers)
>>> headers['range']
'bytes=500-999'
>>> add_bytes_range(None, 499, headers)
>>> headers['range']
'bytes=0-499'
>>> add_bytes_range(-500, None, headers)
>>> headers['range']
'bytes=-500'
>>> add_bytes_range(9500, None, headers)
>>> headers['range']
'bytes=9500-'
Args:
start (Optional[int]): The first byte in a range. Can be zero,
positive, negative or :data:`None`.
end (Optional[int]): The last byte in a range. Assumed to be
positive.
headers (Mapping[str, str]): A headers mapping which can have the
bytes range added if at least one of ``start`` or ``end``
is not :data:`None`.
"""
if start is None:
if end is None:
# No range to add.
return
else:
# NOTE: This assumes ``end`` is non-negative.
bytes_range = "0-{:d}".format(end)
else:
if end is None:
if start < 0:
bytes_range = "{:d}".format(start)
else:
bytes_range = "{:d}-".format(start)
else:
# NOTE: This is invalid if ``start < 0``.
bytes_range = "{:d}-{:d}".format(start, end)
headers[_helpers.RANGE_HEADER] = "bytes=" + bytes_range
def get_range_info(response, get_headers, callback=_helpers.do_nothing):
"""Get the start, end and total bytes from a content range header.
Args:
response (object): An HTTP response object.
get_headers (Callable[Any, Mapping[str, str]]): Helper to get headers
from an HTTP response.
callback (Optional[Callable]): A callback that takes no arguments,
to be executed when an exception is being raised.
Returns:
Tuple[int, int, int]: The start byte, end byte and total bytes.
Raises:
~google.resumable_media.common.InvalidResponse: If the
``Content-Range`` header is not of the form
``bytes {start}-{end}/{total}``.
"""
content_range = _helpers.header_required(
response, _helpers.CONTENT_RANGE_HEADER, get_headers, callback=callback
)
match = _CONTENT_RANGE_RE.match(content_range)
if match is None:
callback()
raise common.InvalidResponse(
response,
"Unexpected content-range header",
content_range,
'Expected to be of the form "bytes {start}-{end}/{total}"',
)
return (
int(match.group("start_byte")),
int(match.group("end_byte")),
int(match.group("total_bytes")),
)
def _check_for_zero_content_range(response, get_status_code, get_headers):
"""Validate if response status code is 416 and content range is zero.
This is the special case for handling zero bytes files.
Args:
response (object): An HTTP response object.
get_status_code (Callable[Any, int]): Helper to get a status code
from a response.
get_headers (Callable[Any, Mapping[str, str]]): Helper to get headers
from an HTTP response.
Returns:
bool: True if content range total bytes is zero, false otherwise.
"""
if get_status_code(response) == http.client.REQUESTED_RANGE_NOT_SATISFIABLE:
content_range = _helpers.header_required(
response,
_helpers.CONTENT_RANGE_HEADER,
get_headers,
callback=_helpers.do_nothing,
)
if content_range == _ZERO_CONTENT_RANGE_HEADER:
return True
return False
| 36.885714 | 88 | 0.617787 |
7958e4acc66eca059d101eb2884d712cd85ea78b | 7,668 | py | Python | MMcalendar/models.py | PhiloTFarnsworth/MarketMePostMortem | 4627312f1c784abaf51a4eab7e0ecae940112478 | [
"MIT"
] | null | null | null | MMcalendar/models.py | PhiloTFarnsworth/MarketMePostMortem | 4627312f1c784abaf51a4eab7e0ecae940112478 | [
"MIT"
] | null | null | null | MMcalendar/models.py | PhiloTFarnsworth/MarketMePostMortem | 4627312f1c784abaf51a4eab7e0ecae940112478 | [
"MIT"
] | null | null | null | from django.db import models
from uuid import uuid4
from MMUX.models import User
from MMcalendar.util import initStaticValue
from datetime import timedelta, datetime
## Relationship Levels at this moment. public events and public time are
## displayed to anyone, the following button makes the introduction, then
## clients graduate, at the calendar owner's discretion, to seeing relationship
## and priority relationship reserved time.
RELCHOICES = [
(0, 'Public'),
(1, 'Introduction'), ## Calendar owner is followed
(2, 'Relationship'), ## Calendar owner solidifies relationship
(3, 'Priority Relationship') ## top priority clients, full calendar/service access
]
class CalendarManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(owner=name)
# Create your models here.
class Calendar(models.Model):
## Seems pretty simple, we'll generate a default calendar on registration of an account,
## and give users the option to create additional calendars. We can pass the name and
## lastModified information to the VCALENDAR header when we generate a ics file.
name = models.CharField(max_length=128)
lastModified = models.DateTimeField(auto_now=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
unique_together = ['name', 'owner']
##EventCategory is another meh feature. While we can hook it up and have an extra facet
## of searchability of our calendar objects, it's not really worth while to implement at
## this time as the nature of our calendars are mostly private. More of an interesting
## feature for public calendars where you would want to use metadeta to get more eyes on it.
##class EventCategory(models.Model):
## We could allow users to submit their own categories, but I think for this purpose
## nudging users towards our own predefined categories will help to keep activities
## like searching simple on both the developer and user end.
##name = models.CharField(max_length=64)
##def __str__(self):
## return f'{self.name}'
## baseEvent feeds to both services and events. While I had considered making services take a user
## as the originator of the event, we simplify the logic (as well as create more customization for
## individual calendars) if we keep services and events as similar as possible.
class baseEvent(models.Model):
summary = models.CharField(max_length=64)
##categories = models.ManyToManyField(EventCategory)
description = models.TextField(max_length=512)
duration = models.DurationField()
## eventClass determines private/public events
CLASSKEYS = [
("PUBLIC", "PUBLIC"),
("PRIVATE", "PRIVATE"),
("CONFIDENTIAL", "CONFIDENTIAL")
]
eventClass = models.CharField(max_length=16, choices=CLASSKEYS, default="PRIVATE")
price = models.DecimalField(max_digits=6, decimal_places=2, default=0.00)
class Meta:
abstract = True
## Users can define thier own services which we can use to pre-populate event queries.
class Service(baseEvent):
organizer = models.ForeignKey(User, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
## used to determine which services are rendered
relationRequired = models.IntegerField(choices=RELCHOICES, default=3)
class Event(baseEvent):
## We pass our events based on calendar, so we'll take calendars for the Organizer as well as Attendees
mainCalendar = models.ForeignKey(Calendar, on_delete=models.CASCADE, related_name="Organizer")
## We're going to make our event model as similar as possible to ICal 5455 spec, so we'll want to try to keep
## our properties as similar to those as possible
uid = models.CharField(max_length=100, primary_key=True, unique=True, default=uuid4)
created = models.DateTimeField(auto_now_add=True) ## time created
lastModified = models.DateTimeField(auto_now=True)
sequence = models.IntegerField(default=0) ## number of edits
## Use Datetime to find any collisions between times provided
dtstart = models.DateTimeField() ## start of event
dtend = models.DateTimeField(blank=True, null=True)
## attendees can be added by event host, and the host can allow people to +1 public events
attendee = models.ManyToManyField(Calendar, related_name="Attendees", blank=True)
## URL is for representations of this event, so we'll use Location for links/whathaveyou
location = models.CharField(max_length=128, blank=True)
## confirmation will default to "tentative" when a participant makes a request, which should prompt
## some sort of message to the host for them to confirm the event.
STATUSKEYS = [
('TENTATIVE','TENTATIVE'),
('CONFIRMED','CONFIRMED'),
('CANCELLED', 'CANCELLED')
]
status = models.CharField(max_length=16, choices=STATUSKEYS, default='TENTATIVE')
## TRANSP is more of a speculative class at the moment, I wouldn't want users to double book time.
transp = models.CharField(max_length=16, default="OPAQUE") ## can easily swap out with TRANSPKEYS class
def save(self, *args, **kwargs):
##Update the blank dtend to the end of the event
self.dtend = self.dtstart + self.duration
super().save(*args, **kwargs)
## TRANSP may play a role in this later, but to my mind any confirmed a event should own a start time. Our client side handles
## a fair bit of restricting event creation to unoccupied time, but server side restrictions are prudent as well.
class Meta:
constraints = [
models.UniqueConstraint(fields=['dtstart'], condition=models.Q(status='CONFIRMED'), name='unique_confirmed_event')
]
class AvailabilityRule(models.Model):
## So, for availability we want users to specify when they are available to when they aren't. We
## also want to classify 4 tiers of time availability, which match our relationship tiers.
name = models.CharField(max_length=64)
begin = models.DateTimeField(blank=True, null=True)
end = models.DateTimeField(blank=True, null=True)
unavailable = models.BooleanField(default=True)
relationship = models.IntegerField(choices=RELCHOICES, default=0) ## Enable this for availability later
calendar = models.ForeignKey(Calendar, on_delete=models.CASCADE)
## When we parse these choices, the more specific the choice, the higher the precedence. So if
## a user has 3 availability rules like Everyday:9-5, Weekends:10-2, Sunday: no time, then
## Saturdays are 10-2, sundays are no time.
DAYCHOICES = [
('EVD', 'Everyday'),
('WDY', 'Weekdays'),
('WND', 'Weekends'),
('MON', 'Mondays'),
('TUE', 'Tuesdays'),
('WED', 'Wednesdays'),
('THU', 'Thursdays'),
('FRI', 'Fridays'),
('SAT', 'Saturdays'),
('SUN', 'Sundays')
]
scope = models.CharField(max_length=3, choices=DAYCHOICES, default='EVD')
## Is this rule being applied, or just a saved preset?
active = models.BooleanField(default=True)
## Ideally, we would have the user create a rule, then they can see it overlaid on a week
## table. If they have multiple active, the overlay will morph to reflect that. Besides
## the scope inheritance, we'll also have inheritance based on relationship. So public
## availability -> ... -> Priority relationship. Big question is whether we sequester each
## level of relationship in their own tab on the edit screen, or perhaps we color code our
## relationship availabilities to show all the info on one calendar page. | 51.463087 | 131 | 0.713615 |
7958e5b4409d56818d4ed7fef0478e2ca853bec7 | 1,314 | py | Python | build/catkin_generated/generate_cached_setup.py | tianchenji/gem-gazebo | b0e45d55eb2805327f628625006ffedbe1783774 | [
"MIT"
] | null | null | null | build/catkin_generated/generate_cached_setup.py | tianchenji/gem-gazebo | b0e45d55eb2805327f628625006ffedbe1783774 | [
"MIT"
] | null | null | null | build/catkin_generated/generate_cached_setup.py | tianchenji/gem-gazebo | b0e45d55eb2805327f628625006ffedbe1783774 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/turtlebot/catkin_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/turtlebot/gem-gazebo/devel/env.sh')
output_filename = '/home/turtlebot/gem-gazebo/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| 42.387097 | 102 | 0.736682 |
7958e5c22ada98cf8fcb383fe0a8c5493f54b3a0 | 2,538 | py | Python | tests/extmod/uctypes_native_le.py | TG-Techie/circuitpython | 390295dd218fb705fe652de77132dea472adf1ed | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 663 | 2018-12-30T00:17:59.000Z | 2022-03-14T05:03:41.000Z | tests/extmod/uctypes_native_le.py | TG-Techie/circuitpython | 390295dd218fb705fe652de77132dea472adf1ed | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 41 | 2019-06-06T08:31:19.000Z | 2022-02-13T16:53:41.000Z | tests/extmod/uctypes_native_le.py | TG-Techie/circuitpython | 390295dd218fb705fe652de77132dea472adf1ed | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 60 | 2019-06-01T04:25:00.000Z | 2022-02-25T01:47:31.000Z | # This test is exactly like uctypes_le.py, but uses native structure layout.
# Codepaths for packed vs native structures are different. This test only works
# on little-endian machine (no matter if 32 or 64 bit).
import sys
try:
import uctypes
except ImportError:
print("SKIP")
raise SystemExit
if sys.byteorder != "little":
print("SKIP")
raise SystemExit
desc = {
"s0": uctypes.UINT16 | 0,
"sub": (0, {"b0": uctypes.UINT8 | 0, "b1": uctypes.UINT8 | 1}),
"arr": (uctypes.ARRAY | 0, uctypes.UINT8 | 2),
"arr2": (uctypes.ARRAY | 0, 2, {"b": uctypes.UINT8 | 0}),
"bitf0": uctypes.BFUINT16 | 0 | 0 << uctypes.BF_POS | 8 << uctypes.BF_LEN,
"bitf1": uctypes.BFUINT16 | 0 | 8 << uctypes.BF_POS | 8 << uctypes.BF_LEN,
"bf0": uctypes.BFUINT16 | 0 | 0 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"bf1": uctypes.BFUINT16 | 0 | 4 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"bf2": uctypes.BFUINT16 | 0 | 8 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"bf3": uctypes.BFUINT16 | 0 | 12 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"ptr": (uctypes.PTR | 0, uctypes.UINT8),
"ptr2": (uctypes.PTR | 0, {"b": uctypes.UINT8 | 0}),
}
data = bytearray(b"01")
S = uctypes.struct(uctypes.addressof(data), desc, uctypes.NATIVE)
# print(S)
print(hex(S.s0))
assert hex(S.s0) == "0x3130"
# print(S.sub.b0)
print(S.sub.b0, S.sub.b1)
assert S.sub.b0, S.sub.b1 == (0x30, 0x31)
try:
S[0]
assert False, "Can't index struct"
except TypeError:
print("TypeError")
print("arr:", S.arr[0], S.arr[1])
assert (S.arr[0], S.arr[1]) == (0x30, 0x31)
print("arr of struct:", S.arr2[0].b, S.arr2[1].b)
assert (S.arr2[0].b, S.arr2[1].b) == (0x30, 0x31)
try:
S.arr[2]
assert False, "Out of bounds index"
except IndexError:
print("IndexError")
print("bf:", S.bitf0, S.bitf1)
assert (S.bitf0, S.bitf1) == (0x30, 0x31)
print("bf 4bit:", S.bf3, S.bf2, S.bf1, S.bf0)
assert (S.bf3, S.bf2, S.bf1, S.bf0) == (3, 1, 3, 0)
# Write access
S.sub.b0 = ord("2")
print(data)
assert bytes(data) == b"21"
S.bf3 = 5
print(data)
assert bytes(data) == b"2Q"
desc2 = {
"bf8": uctypes.BFUINT8 | 0 | 0 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"bf32": uctypes.BFUINT32 | 0 | 20 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
}
data2 = bytearray(b"0123")
S2 = uctypes.struct(uctypes.addressof(data2), desc2, uctypes.NATIVE)
# bitfield using uint8 as base type
S2.bf8 = 5
print(data2)
assert bytes(data2) == b"5123"
# bitfield using uint32 as base type
S2.bf32 = 5
print(data2)
assert bytes(data2) == b"51R3"
| 26.4375 | 79 | 0.62963 |
7958e602ce18e615a314b68529c0d6315b406a02 | 2,270 | py | Python | rllib/airgym/envs/test.py | rlturkiye/flying-cavalry | a56c304afd970deaf762b0c6c90fb141106cbd81 | [
"MIT"
] | 9 | 2021-05-22T09:44:41.000Z | 2022-03-16T17:26:58.000Z | rllib/airgym/envs/test.py | rlturkiye/flying-cavalry | a56c304afd970deaf762b0c6c90fb141106cbd81 | [
"MIT"
] | 2 | 2021-05-22T12:11:33.000Z | 2021-07-05T02:27:50.000Z | rllib/airgym/envs/test.py | rlturkiye/flying-cavalry | a56c304afd970deaf762b0c6c90fb141106cbd81 | [
"MIT"
] | 1 | 2021-05-22T09:44:53.000Z | 2021-05-22T09:44:53.000Z | import airsim
from airsim.types import YawMode
import numpy as np
import random
from time import sleep
import math
drone = airsim.CarClient()
#print(drone.getCarState().kinematics_estimated.position)
#print(drone.simGetObjectPose("KargoArabasi").position)
#print(random.randint(0, 4))
def transform_angle(yaw):
phi = np.linspace(-1, 1, 360)
for i, value in enumerate(phi):
if value >= yaw:
degree = i
break
return degree
print(transform_angle(0))
geoFenceCoords = [0, 0, 0, 0]
geofence_corners = ((geoFenceCoords[0], geoFenceCoords[1]),
(geoFenceCoords[0], geoFenceCoords[3]),
(geoFenceCoords[2], geoFenceCoords[3]),
(geoFenceCoords[2], geoFenceCoords[1]))
dx = geofence_corners[0][0] - 1
dy = geofence_corners[0][1] - 1
"""drone.reset()
drone.enableApiControl(True)
drone.armDisarm(True)
drone.takeoffAsync()
vel = drone.getMultirotorState().kinematics_estimated.linear_velocity
drone.moveByVelocityZAsync(
vel.x_val,
vel.y_val,
-15,
3
).join()
def interpret_action(action):
step_length = 10
if action == 0:
quad_offset = (step_length, 0, 1)
elif action == 1:
quad_offset = (0, step_length, 1)
elif action == 2:
quad_offset = (-step_length, 0, 1)
elif action == 3:
quad_offset = (0, -step_length, 1)
elif action == 4:
quad_offset = (step_length, 0, -1)
elif action == 5:
quad_offset = (0, step_length, -1)
elif action == 6:
quad_offset = (-step_length, 0, -1)
elif action == 7:
quad_offset = (0, -step_length, -1)
else:
quad_offset = (0, 0, 0)
return quad_offset
while True:
yawMode = YawMode(is_rate=True, yaw_or_rate=random.randint(0, 1))
quad_offset = interpret_action(random.randint(0, 9))
vel = drone.getMultirotorState().kinematics_estimated.linear_velocity
zpos = drone.getMultirotorState().kinematics_estimated.position.z_val
drone.moveByVelocityZAsync(
vel.x_val + quad_offset[0],
vel.y_val + quad_offset[1],
zpos + quad_offset[2],
10,
yaw_mode=yawMode
)
sleep(1)
"""
| 28.375 | 73 | 0.617181 |
7958e70153ce5a5858fc5ef3ec7713709317624e | 4,559 | py | Python | _pycharm_skeletons/renderdoc/SDObject.py | Lex-DRL/renderdoc-py-stubs | 75d280e4f500ded506f3315a49fc432b37ab4fa6 | [
"MIT"
] | null | null | null | _pycharm_skeletons/renderdoc/SDObject.py | Lex-DRL/renderdoc-py-stubs | 75d280e4f500ded506f3315a49fc432b37ab4fa6 | [
"MIT"
] | null | null | null | _pycharm_skeletons/renderdoc/SDObject.py | Lex-DRL/renderdoc-py-stubs | 75d280e4f500ded506f3315a49fc432b37ab4fa6 | [
"MIT"
] | null | null | null | # encoding: utf-8
# module renderdoc
# from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd
# by generator 1.146
# no doc
# imports
import enum as __enum
from .SwigPyObject import SwigPyObject
class SDObject(SwigPyObject):
""" Defines a single structured object. """
def AddChild(self, child): # real signature unknown; restored from __doc__
"""
AddChild(child)
Add a new child object by duplicating it.
"""
pass
def AsBool(self): # real signature unknown; restored from __doc__
"""
AsBool()
Interprets the object as a ``bool`` and returns its value.
Invalid if the object is not actually a ``bool``.
"""
pass
def AsFloat(self): # real signature unknown; restored from __doc__
"""
AsFloat()
Interprets the object as a floating point number and returns its value.
Invalid if the object is not actually a floating point number.
"""
pass
def AsInt(self): # real signature unknown; restored from __doc__
"""
AsInt()
Interprets the object as an integer and returns its value.
Invalid if the object is not actually an integer.
"""
pass
def AsResourceId(self): # real signature unknown; restored from __doc__
"""
AsResourceId()
Interprets the object as a :class:`ResourceId` and returns its value.
Invalid if the object is not actually a :class:`ResourceId`.
"""
pass
def AsString(self): # real signature unknown; restored from __doc__
"""
AsString()
Interprets the object as a string and returns its value.
Invalid if the object is not actually a string.
"""
pass
def Duplicate(self): # real signature unknown; restored from __doc__
"""
Duplicate()
Create a deep copy of this object.
"""
pass
def FindChild(self, childName): # real signature unknown; restored from __doc__
"""
FindChild(childName)
Find a child object by a given name.
"""
pass
def GetChild(self, index): # real signature unknown; restored from __doc__
"""
GetChild(index)
Get a child object at a given index.
"""
pass
def GetChildren(self): # real signature unknown; restored from __doc__
"""
GetChildren()
Get a ``list`` of :class:`SDObject` children.
"""
pass
def NumChildren(self): # real signature unknown; restored from __doc__
"""
NumChildren()
Get the number of child objects.
"""
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
data = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The :class:`SDObjectData` with the contents of this object."""
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The name of this object."""
this = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
thisown = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
type = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The :class:`SDType` of this object."""
__dict__ = None # (!) real value is ''
| 28.31677 | 97 | 0.586093 |
7958e76862c46f95ae940e84090876437c8ebe94 | 9,577 | py | Python | mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | # RUN: SUPPORT_LIB=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \
# RUN: %PYTHON %s | FileCheck %s
import ctypes
import errno
import itertools
import os
import sys
from typing import List, Callable
import numpy as np
from mlir import ir
from mlir import runtime as rt
from mlir.execution_engine import ExecutionEngine
from mlir.dialects import builtin
from mlir.dialects import std
from mlir.dialects import sparse_tensor as st
_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import sparse_compiler
# ===----------------------------------------------------------------------=== #
# TODO: move this boilerplate to its own module, so it can be used by
# other tests and programs.
class TypeConverter:
"""Converter between NumPy types and MLIR types."""
def __init__(self, context: ir.Context):
# Note 1: these are numpy "scalar types" (i.e., the values of
# np.sctypeDict) not numpy "dtypes" (i.e., the np.dtype class).
#
# Note 2: we must construct the MLIR types in the same context as the
# types that'll be passed to irtype_to_sctype() or irtype_to_dtype();
# otherwise, those methods will raise a KeyError.
types_list = [
(np.float64, ir.F64Type.get(context=context)),
(np.float32, ir.F32Type.get(context=context)),
(np.int64, ir.IntegerType.get_signless(64, context=context)),
(np.int32, ir.IntegerType.get_signless(32, context=context)),
(np.int16, ir.IntegerType.get_signless(16, context=context)),
(np.int8, ir.IntegerType.get_signless(8, context=context)),
]
self._sc2ir = dict(types_list)
self._ir2sc = dict(( (ir,sc) for sc,ir in types_list ))
def dtype_to_irtype(self, dtype: np.dtype) -> ir.Type:
"""Returns the MLIR equivalent of a NumPy dtype."""
try:
return self.sctype_to_irtype(dtype.type)
except KeyError as e:
raise KeyError(f'Unknown dtype: {dtype}') from e
def sctype_to_irtype(self, sctype) -> ir.Type:
"""Returns the MLIR equivalent of a NumPy scalar type."""
if sctype in self._sc2ir:
return self._sc2ir[sctype]
else:
raise KeyError(f'Unknown sctype: {sctype}')
def irtype_to_dtype(self, tp: ir.Type) -> np.dtype:
"""Returns the NumPy dtype equivalent of an MLIR type."""
return np.dtype(self.irtype_to_sctype(tp))
def irtype_to_sctype(self, tp: ir.Type):
"""Returns the NumPy scalar-type equivalent of an MLIR type."""
if tp in self._ir2sc:
return self._ir2sc[tp]
else:
raise KeyError(f'Unknown ir.Type: {tp}')
def get_RankedTensorType_of_nparray(self, nparray: np.ndarray) -> ir.RankedTensorType:
"""Returns the ir.RankedTensorType of a NumPy array. Note that NumPy
arrays can only be converted to/from dense tensors, not sparse tensors."""
# TODO: handle strides as well?
return ir.RankedTensorType.get(nparray.shape,
self.dtype_to_irtype(nparray.dtype))
# ===----------------------------------------------------------------------=== #
class StressTest:
def __init__(self, tyconv: TypeConverter):
self._tyconv = tyconv
self._roundtripTp = None
self._module = None
self._engine = None
def _assertEqualsRoundtripTp(self, tp: ir.RankedTensorType):
assert self._roundtripTp is not None, \
'StressTest: uninitialized roundtrip type'
if tp != self._roundtripTp:
raise AssertionError(
f"Type is not equal to the roundtrip type.\n"
f"\tExpected: {self._roundtripTp}\n"
f"\tFound: {tp}\n")
def build(self, types: List[ir.Type]):
"""Builds the ir.Module. The module has only the @main function,
which will convert the input through the list of types and then back
to the initial type. The roundtrip type must be a dense tensor."""
assert self._module is None, 'StressTest: must not call build() repeatedly'
self._module = ir.Module.create()
with ir.InsertionPoint(self._module.body):
tp0 = types.pop(0)
self._roundtripTp = tp0
# TODO: assert dense? assert element type is recognised by the TypeConverter?
types.append(tp0)
funcTp = ir.FunctionType.get(inputs=[tp0], results=[tp0])
funcOp = builtin.FuncOp(name='main', type=funcTp)
funcOp.attributes['llvm.emit_c_interface'] = ir.UnitAttr.get()
with ir.InsertionPoint(funcOp.add_entry_block()):
arg0 = funcOp.entry_block.arguments[0]
self._assertEqualsRoundtripTp(arg0.type)
v = st.ConvertOp(types.pop(0), arg0)
for tp in types:
w = st.ConvertOp(tp, v)
# Release intermediate tensors before they fall out of scope.
st.ReleaseOp(v.result)
v = w
self._assertEqualsRoundtripTp(v.result.type)
std.ReturnOp(v)
return self
def writeTo(self, filename):
"""Write the ir.Module to the given file. If the file already exists,
then raises an error. If the filename is None, then is a no-op."""
assert self._module is not None, \
'StressTest: must call build() before writeTo()'
if filename is None:
# Silent no-op, for convenience.
return self
if os.path.exists(filename):
raise FileExistsError(errno.EEXIST, os.strerror(errno.EEXIST), filename)
with open(filename, 'w') as f:
f.write(str(self._module))
return self
def compile(self, compiler, support_lib: str):
"""Compile the ir.Module."""
assert self._module is not None, \
'StressTest: must call build() before compile()'
assert self._engine is None, \
'StressTest: must not call compile() repeatedly'
compiler(self._module)
self._engine = ExecutionEngine(
self._module, opt_level=0, shared_libs=[support_lib])
return self
def run(self, np_arg0: np.ndarray) -> np.ndarray:
"""Runs the test on the given numpy array, and returns the resulting
numpy array."""
assert self._engine is not None, \
'StressTest: must call compile() before run()'
self._assertEqualsRoundtripTp(
self._tyconv.get_RankedTensorType_of_nparray(np_arg0))
np_out = np.zeros(np_arg0.shape, dtype=np_arg0.dtype)
self._assertEqualsRoundtripTp(
self._tyconv.get_RankedTensorType_of_nparray(np_out))
mem_arg0 = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(np_arg0)))
mem_out = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(np_out)))
self._engine.invoke('main', mem_out, mem_arg0)
return rt.ranked_memref_to_numpy(mem_out[0])
# ===----------------------------------------------------------------------=== #
def main():
"""
USAGE: python3 test_stress.py [raw_module.mlir [compiled_module.mlir]]
The environment variable SUPPORT_LIB must be set to point to the
libmlir_c_runner_utils shared library. There are two optional
arguments, for debugging purposes. The first argument specifies where
to write out the raw/generated ir.Module. The second argument specifies
where to write out the compiled version of that ir.Module.
"""
support_lib = os.getenv('SUPPORT_LIB')
assert support_lib is not None, 'SUPPORT_LIB is undefined'
if not os.path.exists(support_lib):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), support_lib)
# CHECK-LABEL: TEST: test_stress
print("\nTEST: test_stress")
with ir.Context() as ctx, ir.Location.unknown():
par = 0
vec = 0
vl = 1
e = False
sparsification_options = (
f'parallelization-strategy={par} '
f'vectorization-strategy={vec} '
f'vl={vl} '
f'enable-simd-index32={e}')
compiler = sparse_compiler.SparseCompiler(options=sparsification_options)
f64 = ir.F64Type.get()
# Be careful about increasing this because
# len(types) = 1 + 2^rank * rank! * len(bitwidths)^2
shape = range(2, 6)
rank = len(shape)
# All combinations.
levels = list(itertools.product(*itertools.repeat(
[st.DimLevelType.dense, st.DimLevelType.compressed], rank)))
# All permutations.
orderings = list(map(ir.AffineMap.get_permutation,
itertools.permutations(range(rank))))
bitwidths = [0]
# The first type must be a dense tensor for numpy conversion to work.
types = [ir.RankedTensorType.get(shape, f64)]
for level in levels:
for ordering in orderings:
for pwidth in bitwidths:
for iwidth in bitwidths:
attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth)
types.append(ir.RankedTensorType.get(shape, f64, attr))
#
# For exhaustiveness we should have one or more StressTest, such
# that their paths cover all 2*n*(n-1) directed pairwise combinations
# of the `types` set. However, since n is already superexponential,
# such exhaustiveness would be prohibitive for a test that runs on
# every commit. So for now we'll just pick one particular path that
# at least hits all n elements of the `types` set.
#
tyconv = TypeConverter(ctx)
size = 1
for d in shape:
size *= d
np_arg0 = np.arange(size, dtype=tyconv.irtype_to_dtype(f64)).reshape(*shape)
np_out = (
StressTest(tyconv).build(types).writeTo(
sys.argv[1] if len(sys.argv) > 1 else None).compile(
compiler, support_lib).writeTo(
sys.argv[2] if len(sys.argv) > 2 else None).run(np_arg0))
# CHECK: Passed
if np.allclose(np_out, np_arg0):
print('Passed')
else:
sys.exit('FAILURE')
if __name__ == '__main__':
main()
| 39.25 | 88 | 0.666493 |
7958e824e0194d4727eea659a2d12d33b9eed812 | 2,379 | py | Python | macdaily/cls/install/pip.py | JarryShaw/MacDaily | 853b841dd1f1f7e6aae7bf2c305ff008bc76055c | [
"BSD-3-Clause"
] | 10 | 2018-09-20T19:57:56.000Z | 2021-11-14T18:28:10.000Z | macdaily/cls/install/pip.py | JarryShaw/jsdaily | 3ca7aa7c75a12dc08ab44f78af2b089e1ed41d3d | [
"BSD-3-Clause"
] | 2 | 2020-05-31T08:49:47.000Z | 2021-12-28T16:57:42.000Z | macdaily/cls/install/pip.py | JarryShaw/jsdaily | 3ca7aa7c75a12dc08ab44f78af2b089e1ed41d3d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from macdaily.cmd.install import InstallCommand
from macdaily.core.pip import PipCommand
from macdaily.util.tools.print import print_info, print_scpt
from macdaily.util.tools.script import sudo
class PipInstall(PipCommand, InstallCommand):
def _parse_args(self, namespace):
self._brew = namespace.get('brew', False) # pylint: disable=attribute-defined-outside-init
self._cpython = namespace.get('cpython', False) # pylint: disable=attribute-defined-outside-init
self._no_cleanup = namespace.get('no_cleanup', False) # pylint: disable=attribute-defined-outside-init
self._pre = namespace.get('pre', False) # pylint: disable=attribute-defined-outside-init
self._pypy = namespace.get('pypy', False) # pylint: disable=attribute-defined-outside-init
self._system = namespace.get('system', False) # pylint: disable=attribute-defined-outside-init
self._user = namespace.get('user', False) # pylint: disable=attribute-defined-outside-init
self._quiet = namespace.get('quiet', False) # pylint: disable=attribute-defined-outside-init
self._verbose = namespace.get('verbose', False) # pylint: disable=attribute-defined-outside-init
self._yes = namespace.get('yes', False) # pylint: disable=attribute-defined-outside-init
self._install_opts = namespace.get('install', str()).split() # pylint: disable=attribute-defined-outside-init
def _proc_install(self, path):
text = f'Installing specified {self.desc[1]}'
print_info(text, self._file, redirect=self._qflag)
argv = [path, '-m', 'pip', 'install']
if self._pre:
argv.append('--pre')
if self._user:
argv.append('--user')
if self._quiet:
argv.append('--quiet')
if self._verbose:
argv.append('--verbose')
argv.extend(self._install_opts)
argv.append('')
for package in self._var__temp_pkgs:
argv[-1] = package
print_scpt(argv, self._file, redirect=self._qflag)
if sudo(argv, self._file, self._password, timeout=self._timeout,
redirect=self._qflag, verbose=self._vflag, sethome=True):
self._fail.append(package)
else:
self._pkgs.append(package)
del self._var__temp_pkgs
| 46.647059 | 118 | 0.656999 |
7958e8be36da6380c479203f6844c7801d423bd7 | 1,691 | py | Python | python-cim/cim/mutablenamedtuple.py | pombredanne/flare-wmi | fdde184b7bf98e1043f8246e0c75a7a1316c48c9 | [
"Apache-2.0"
] | 390 | 2015-08-06T20:33:01.000Z | 2021-09-11T21:43:34.000Z | python-cim/cim/mutablenamedtuple.py | pombredanne/flare-wmi | fdde184b7bf98e1043f8246e0c75a7a1316c48c9 | [
"Apache-2.0"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/vstructui/mutablenamedtuple.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 106 | 2015-08-08T22:44:50.000Z | 2021-08-25T09:20:57.000Z | """
mutablenamedtuple is like collections.namedtuple, but the fields
may be modified. This makes it basically a record type.
Desired usage:
F = mutablenamedtuple("F", ["foo", "bar", "baz"])
f = F(1, bar=2, baz=3)
f.baz = 9
print(f)
--> "F(foo=1, bar=2, baz=9)"
"""
def _mutablenamedtuple__init(self, *args, **kwargs):
super(self.__class__, self).__init__()
if len(args) > len(self.__fields__):
raise RuntimeError("Too many arguments provided to {classname}.__init__".format(
classname=self.__class__.__name__))
for i, arg in enumerate(args):
setattr(self, self.__fields__[i], arg)
for k, v in kwargs.items():
if k not in self.__fields__:
raise RuntimeError("{fieldname} not a valid field in {classname}".format(
fieldname=k, classname=self.__class__.__name__))
setattr(self, k, v)
def _mutablenamedtuple__str(self):
formatted_fields = []
for k in self.__fields__:
v = getattr(self, k)
formatted_v = str(v)
if len(formatted_v) > 0xa:
formatted_v = formatted_v[:0xa] + "..."
formatted_fields.append("{key}={value}".format(key=k, value=formatted_v))
return "{classname}({values})".format(classname=self.__class__.__name__,
values=", ".join(formatted_fields))
def mutablenamedtuple(name, fields):
fs = {f: None for f in fields}
fs["__fields__"] = fields[:]
fs["__init__"] = _mutablenamedtuple__init
fs["__str__"] = _mutablenamedtuple__str
fs["__repr__"] = _mutablenamedtuple__str
t = type(name, (object,), fs)
return t
| 31.90566 | 88 | 0.612655 |
7958e8f0998c95c2736a0019cc223253cd66d24e | 921 | py | Python | tests/test-order_test-case.py | omarion3698/Book-A-Meal | 330a2331ee13b52772d20eedbc00fc4364585709 | [
"MIT"
] | null | null | null | tests/test-order_test-case.py | omarion3698/Book-A-Meal | 330a2331ee13b52772d20eedbc00fc4364585709 | [
"MIT"
] | null | null | null | tests/test-order_test-case.py | omarion3698/Book-A-Meal | 330a2331ee13b52772d20eedbc00fc4364585709 | [
"MIT"
] | null | null | null | def test_get_all_menus(self):
""" Test API can get all menus."""
self.menu = {
"name": "KFC Special",
"description": "Finger lickin chicken"
}
response = self.app.post("/api/v1/menus", data = self.menu)
self.assertEqual(response.status_code, 201)
response = self.app.get("/api/v1/menus")
self.assertEqual(response.status_code, 200)
self.assertIn(self.menu["name"], str(response.data))
self.assertIn('Finger lickin', str(response.data))
def test_update_a_menu(self):
""" Test API can update a menu. """
self.menu = {"name": "KFC Special", "description": "Finger lickin chicken"}
response = self.app.post("/api/v1/menus", data = self.menu)
self.assertEqual(response.status_code, 201)
self.menu = { "name": "KFC Special 3", "description": "Just Eat Finger lickin chicken..."}
response = self.app.put("/api/v1/menus/1", data = self.menu)
self.assertEqual(response.status_code, 201) | 41.863636 | 92 | 0.687296 |
7958e96b443967cbf237c8e9f11a4ff5808e77a5 | 12,529 | py | Python | utils/progress_bar.py | Syler1984/seismo-ml-models-integration | fbfb98cda1b59699bc17e51d51de314be52e259e | [
"MIT"
] | null | null | null | utils/progress_bar.py | Syler1984/seismo-ml-models-integration | fbfb98cda1b59699bc17e51d51de314be52e259e | [
"MIT"
] | 1 | 2021-05-14T00:12:13.000Z | 2021-05-14T00:12:13.000Z | utils/progress_bar.py | Syler1984/seismo-ml-models-integration | fbfb98cda1b59699bc17e51d51de314be52e259e | [
"MIT"
] | null | null | null | class ProgressBar:
def __init__(self):
self.progress_maxes = {}
self.progress = {}
self.progress_char = '#'
self.current_progress_char = '#'
self.empty_char = '-'
self.progress_char_length = 30
self._prefix_expression = None
self._postfix_expression = None
self._prefix_kwargs = {}
self._postfix_kwargs = {}
self._last_printed_line_length = 0
def set_length(self, length):
"""
Set progress bar length (number of characters).
:param length:
"""
if length < 0:
raise AttributeError('length should be positive')
self.progress_char_length = length
def set_empty_character(self, char):
"""
Set displayed character (or string) for empty progress part of the bar.
:param char: str
"""
self.empty_char = char
def set_progress_character(self, char):
"""
Set displayed character (or string) for finished progress part of the bar.
:param char: str
"""
self.progress_char = char
def set_current_progress_char(self, char):
"""
Set character for (or string) current progress identification.
:param char: str
:return:
"""
self.current_progress_char = char
def __str__(self):
"""
Renders progress bar as a string.
:return: str
"""
# Render prefix
prefix = ''
if self._prefix_expression and len(self._prefix_kwargs):
prefix = self._prefix_expression.format(**self._prefix_kwargs)
elif self._prefix_expression:
prefix = self._prefix_expression
# Render postfix
postfix = ''
if self._postfix_expression and len(self._postfix_kwargs):
postfix = self._postfix_expression.format(**self._postfix_kwargs)
elif self._postfix_expression:
postfix = self._postfix_expression
# Render bar
bar = ''
current_progress_length = self.progress_char_length
nested_progress_positions = []
# Re-calculate actual progress in screen characters
for level, max_progress in self.progress_maxes.items():
if level not in self.progress:
value = 0
else:
value = self.progress[level]
nested_progress_positions.append((value / max_progress) * current_progress_length)
current_progress_length = int(current_progress_length / max_progress)
# Round and floor progress to fit into the character limit
for i in range(len(nested_progress_positions) - 1):
nested_progress_positions[i] = int(nested_progress_positions[i])
nested_progress_positions[-1] = round(nested_progress_positions[-1])
# Actual bar render
total_progress_chars = sum(nested_progress_positions)
if total_progress_chars == self.progress_char_length:
bar = self.progress_char * total_progress_chars
else:
bar = self.progress_char * max(0, total_progress_chars - 1) + \
self.current_progress_char + \
self.empty_char * (self.progress_char_length - total_progress_chars)
return prefix + bar + postfix # concatenate the bar
def print(self, *progress):
"""
Prints the bar to stdout.
:param progress: indicates progress if specified, equal to calling set_progress without level
(with single progress value or set of values for multiple levels) before print.
Does not change current progress if not specified. Default: None
:return:
"""
self.set_progress(*progress)
bar = self.__str__()
print('\r' + ' ' * self._last_printed_line_length + '\r' + bar, sep = '', end = '', flush = True)
self._last_printed_line_length = len(bar)
def set_max(self, *max_progress, **max_progress_dictionary):
"""
Sets max progress values for progress bar rendering. Values can be int or float.
:param max_progress: one or more arguments for max progress values
:param max_progress_dictionary: use if you want named progress levels
:return: list of keywords for current progress levels
"""
if not len(max_progress) and not len(max_progress_dictionary):
return
if len(max_progress) and len(max_progress_dictionary):
raise AttributeError('max progress should be either all positional or all keyword arguments')
self.progress_maxes = {}
if len(max_progress):
for i, max_val in enumerate(max_progress):
self.progress_maxes[str(i)] = max_val
else:
self.progress_maxes = max_progress_dictionary
return self.progress_maxes.keys()
def add_max(self, level, value, insert_after = None):
"""
Adds new max progress level.
:param level:
:param value:
:param insert_after: str or None - if not specified, insert new level at the end of the list.
:return:
"""
pass
def change_max(self, level, value):
"""
Change max progress for particular progress level.
:param level:
:param value:
:return:
"""
if value < 0:
raise AttributeError('max value should be greater than zero')
if level in self.progress_maxes:
self.progress_maxes[level] = value
def remove_progress_level(self, level):
"""
Removes progress level data, max and value.
:param level: str - keyword for the progress level to remove.
"""
if level in self.progress_maxes:
self.progress_maxes.pop(level, None)
if level in self.progress:
self.progress.pop(level, None)
def set_progress(self, *progress, level = None,
fraction = False, percent = False, print=False):
"""
Sets progress for a single level or for or existing levels as an absolute value, if progress consists of
multiple values.
:param progress:
:param level:
:param fraction:
:param percent:
"""
if not len(progress):
return
if not level and len(progress) > 1:
raise AttributeError('multiple progress values with specified level are not compatible')
if not level and len(progress) != len(self.progress_maxes):
raise AttributeError(f'progress values count ({len(progress)}) should be equal'
f' to the number of progress levels ({len(self.progress_maxes)})')
if fraction and percent:
raise AttributeError('both fraction and percent could not be True simultaneously')
if not level:
self.progress = {}
for value, (level, max_progress) in zip(progress, self.progress_maxes.items()):
if fraction:
value = min(value, 1.)
self.progress[level] = value * max_progress
elif percent:
value = min(value, 100.)
self.progress[level] = (value * max_progress) / 100.
else:
value = min(value, max_progress)
self.progress[level] = value
else:
if level not in self.progress_maxes:
return
if type(level) is not str:
level = str(level)
value = progress[0]
max_progress = self.progress_maxes[level]
if fraction:
value = min(value, 1.)
self.progress[level] = value * max_progress
elif percent:
value = min(value, 100.)
self.progress[level] = (value * max_progress) / 100.
else:
value = min(value, max_progress)
self.progress[level] = value
if print:
self.print()
def set_progress_kwargs(self, fraction = False, percent = False, **progress):
"""
Sets progress by dictionary of level keywords with progress values.
:param progress:
:param fraction:
:param percent:
"""
if not len(progress):
return
for level, value in progress.items():
if level not in self.progress_maxes:
return
max_progress = self.progress_maxes[level]
if fraction:
value = min(value, 1.)
self.progress[level] = value * max_progress
elif percent:
value = min(value, 100.)
self.progress[level] = (value * max_progress) / 100.
else:
value = min(value, max_progress)
self.progress[level] = value
def set_prefix_expression(self, expression, clear_args = True):
"""
Setter for the prefix expression.
This expression will be used when printing the progress bar. Prefix keyword arguments will be used
if specified with the expression.format(prefix_keyword_args) like call.
:param expression: expression string in Pythons format specification mini-language (or just plain string
if no formatting is needed).
:param clear_args:
"""
if expression and type(expression) is not str:
raise TypeError('expression should be either string or None or False')
if clear_args:
self._prefix_kwargs = {}
self._prefix_expression = expression
def set_postfix_expression(self, expression, clear_args = True):
"""
Setter for the postfix expression.
This expression will be used when printing the progress bar. Postfix keyword arguments will be used
if specified with the expression.format(postfix_keyword_args) like call.
:param expression: expression string in Pythons format specification mini-language (or just plain string
if no formatting is needed).
:param clear_args:
"""
if expression and type(expression) is not str:
raise TypeError('expression should be either string or None or False')
if clear_args:
self._prefix_kwargs = {}
self._postfix_expression = expression
def set_prefix(self, expression):
"""
Sets prefix string. Note: if you want to use expression formating with dynamic parameters, use
set_prefix_expression and set_prefix_kwargs instead.
:param expression:
"""
self.set_prefix_expression(self, expression, clear_args = True)
def set_postfix(self, expression):
"""
Sets postfix string. Note: if you want to use expression formating with dynamic parameters, use
set_postfix_expression and set_postfix_kwargs instead.
:param expression:
"""
self.set_postfix_expression(self, expression, clear_args = True)
def set_prefix_kwargs(self, **kwargs):
"""
Set prefix keyword arguments
:param kwargs:
"""
self._prefix_kwargs = kwargs
if print:
self.print()
def set_postfix_kwargs(self, **kwargs):
"""
Set postfix keyword arguments
:param kwargs:
"""
self._postfix_kwargs = kwargs
def set_prefix_arg(self, name, value, print=False):
"""
Set one prefix keyword argument by its keyword
:param name: str - keyword
:param value:
"""
self._prefix_kwargs[name] = value
if print:
self.print()
def set_postfix_arg(self, name, value, print=False):
"""
Set one postfix keyword argument by its keyword
:param name: str - keyword
:param value:
"""
self._postfix_kwargs[name] = value
if print:
self.print()
def pop_prefix_arg(self, name):
"""
Pop prefix argument by its keyword (name)
:param name: str - keyword
:return: argument value or None
"""
return self._prefix_kwargs.pop(name, None)
def pop_postfix_arg(self, name):
"""
Pop postfix argument by its keyword (name)
:param name: str - keyword
:return: argument value or None
"""
return self._postfix_kwargs.pop(name, None)
| 35.899713 | 112 | 0.598531 |
7958e9962ae33e1471559476dcd149bc41c4084b | 5,908 | py | Python | prody/atomic/chain.py | sixpi/ProDy | b0db61622ac774b4c79eafa00c74e0f3b604a774 | [
"MIT"
] | null | null | null | prody/atomic/chain.py | sixpi/ProDy | b0db61622ac774b4c79eafa00c74e0f3b604a774 | [
"MIT"
] | null | null | null | prody/atomic/chain.py | sixpi/ProDy | b0db61622ac774b4c79eafa00c74e0f3b604a774 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""This module defines classes for handling polypeptide/nucleic acid chains."""
from numpy import arange, unique
from .subset import AtomSubset
__all__ = ['Chain']
AAMAP = {
'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D', 'CYS': 'C', 'GLN': 'Q',
'GLU': 'E', 'GLY': 'G', 'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K',
'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S', 'THR': 'T', 'TRP': 'W',
'TYR': 'Y', 'VAL': 'V',
'ASX': 'B', 'GLX': 'Z', 'SEC': 'U', 'PYL': 'O', 'XLE': 'J',
}
_ = {}
for aaa, a in AAMAP.items():
_[a] = aaa
AAMAP.update(_)
AAMAP.update({'PTR': 'Y', 'TPO': 'T', 'SEP': 'S', 'CSO': 'C',
'HSD': 'H', 'HSP': 'H', 'HSE': 'H'})
def getSequence(resnames):
"""Returns polypeptide sequence as from list of *resnames* (residue
name abbreviations)."""
get = AAMAP.get
return ''.join([get(rn, 'X') for rn in resnames])
class Chain(AtomSubset):
"""Instances of this class point to atoms with same chain identifiers and
are generated by :class:`.HierView` class. Following built-in functions
are customized for this class:
* :func:`len` returns the number of residues in the chain
* :func:`iter` yields :class:`.Residue` instances
Indexing :class:`Chain` instances by:
- *residue number [, insertion code]* (:func:`tuple`),
e.g. ``10`` or ``10, "B"``, returns a :class:`.Residue`
- *slice* (:func:`slice`), e.g, ``10:20``, returns a list of
:class:`.Residue` instances"""
__slots__ = ['_ag', '_indices', '_hv', '_acsi', '_selstr', '_seq']
def __init__(self, ag, indices, hv, acsi=None, **kwargs):
AtomSubset.__init__(self, ag, indices, acsi, **kwargs)
self._hv = hv
self._seq = None
def __repr__(self):
n_csets = self._ag.numCoordsets()
segment = self.getSegment()
if segment is None:
segment = ''
else:
segment = ' from ' + str(segment)
if n_csets == 1:
return ('<Chain: {0}{1} from {2} ({3} residues, {4} atoms)>'
).format(self.getChid(), segment, self._ag.getTitle(),
self.numResidues(), self.numAtoms())
elif n_csets > 1:
return ('<Chain: {0}{1} from {2} ({3} residues, {4} '
'atoms; active #{5} of {6} coordsets)>'
).format(self.getChid(), segment, self._ag.getTitle(),
self.numResidues(), self.numAtoms(),
self.getACSIndex(), n_csets)
else:
return ('<Chain: {0}{1} from {2} ({3} residues, '
'{4} atoms; no coordinates)>'
).format(self.getChid(), segment, self._ag.getTitle(),
self.numResidues(), self.numAtoms())
def __str__(self):
return 'Chain ' + self.getChid()
def __getitem__(self, key):
if isinstance(key, tuple):
return self.getResidue(*key)
elif isinstance(key, slice):
resnums = set(arange(*key.indices(self._getResnums().max()+1)))
_list = self._list
return [_list[i] for (rn, ic), i in self._dict.items()
if rn in resnums]
else:
return self.getResidue(key)
def getSegment(self):
"""Returns segment of the chain."""
segname = self.getSegname()
if segname is not None:
return self._hv.getSegment(segname)
def getSegname(self):
"""Returns segment name."""
segnames = self._ag._getSegnames()
if segnames is not None:
return segnames[self._indices[0]]
def getResidue(self, resnum, icode=None):
"""Returns residue with number *resnum* and insertion code *icode*."""
return self._hv.getResidue(self.getChid(), resnum, icode,
self.getSegname())
def iterResidues(self):
"""Yield residues."""
get = self._hv._getResidue
for index in unique(self._getResindices()):
yield get(index)
__iter__ = iterResidues
def numResidues(self):
"""Returns number of residues."""
return len(set(self._getResindices()))
__len__ = numResidues
def getChid(self):
"""Returns chain identifier."""
return self._ag._getChids()[self._indices[0]]
def setChid(self, chid):
"""Set chain identifier."""
self.setChids(chid)
def getChindex(self):
"""Returns chain index."""
return self._ag._getChindices()[self._indices[0]]
def getSequence(self, **kwargs):
"""Returns one-letter sequence string for amino acids in the chain.
When *allres* keyword argument is **True**, sequence will include all
residues (e.g. water molecules) in the chain and **X** will be used for
non-standard residue names."""
if kwargs.get('allres', False):
get = AAMAP.get
seq = ''.join([get(res.getResname(), 'X') for res in self])
elif self._seq:
seq = self._seq
else:
calpha = self.calpha
if calpha:
seq = getSequence(calpha.getResnames())
else:
seq = ''
self._seq = seq
return seq
def getSelstr(self):
"""Returns selection string that selects atoms in this chain."""
segment = self.getSegment()
if segment is None:
if self._selstr:
return 'chain {0} and ({1})'.format(self.getChid(),
self._selstr)
else:
return 'chain {0}'.format(self.getChid())
else:
return 'chain {0} and ({1})'.format(self.getChid(),
segment.getSelstr())
| 32.284153 | 79 | 0.527251 |
7958e9a76431bfec083f2aa95c36554f8a98f764 | 632 | py | Python | beginProject/manage.py | cs-fullstack-2019-fall/django-models3-cw-b-marcus110379 | a6fc1231dd868145d5f5eba766f78cca8526a0de | [
"Apache-2.0"
] | null | null | null | beginProject/manage.py | cs-fullstack-2019-fall/django-models3-cw-b-marcus110379 | a6fc1231dd868145d5f5eba766f78cca8526a0de | [
"Apache-2.0"
] | null | null | null | beginProject/manage.py | cs-fullstack-2019-fall/django-models3-cw-b-marcus110379 | a6fc1231dd868145d5f5eba766f78cca8526a0de | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'beginProject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.727273 | 76 | 0.685127 |
7958ea1d1c188614413d5027d3547da311dade3a | 2,547 | py | Python | app/settings.py | cmyui/cmyui | bb41302c002714efef59042d11902c011e533b7f | [
"MIT"
] | 10 | 2022-02-07T16:11:39.000Z | 2022-03-13T14:05:37.000Z | app/settings.py | Miku-Network/gulag | c6a3835ce138a8d27f7efd764e75466c881e105c | [
"MIT"
] | 26 | 2022-03-15T18:39:10.000Z | 2022-03-31T06:57:06.000Z | app/settings.py | Miku-Network/gulag | c6a3835ce138a8d27f7efd764e75466c881e105c | [
"MIT"
] | 6 | 2022-03-20T18:52:31.000Z | 2022-03-30T21:55:16.000Z | from __future__ import annotations
from typing import Optional
from databases import DatabaseURL
from starlette.config import Config
from starlette.datastructures import CommaSeparatedStrings
from starlette.datastructures import Secret
config = Config(".env")
SERVER_ADDR: str = config("SERVER_ADDR")
SERVER_PORT: Optional[int] = (
int(v) if (v := config("SERVER_PORT", default=None)) else None
)
DB_DSN: DatabaseURL = config("DB_DSN", cast=DatabaseURL)
REDIS_DSN: str = config("REDIS_DSN")
OSU_API_KEY: Secret = config("OSU_API_KEY", cast=Secret)
DOMAIN: str = config("DOMAIN", default="cmyui.xyz")
MIRROR_URL: str = config("MIRROR_URL", default="https://api.chimu.moe/v1")
COMMAND_PREFIX: str = config("COMMAND_PREFIX", default="!")
SEASONAL_BGS: CommaSeparatedStrings = config(
"SEASONAL_BGS",
cast=CommaSeparatedStrings,
default=CommaSeparatedStrings(
[
"https://akatsuki.pw/static/flower.png",
"https://i.cmyui.xyz/nrMT4V2RR3PR.jpeg",
],
),
)
MENU_ICON_URL: str = config(
"MENU_ICON_URL",
default="https://akatsuki.pw/static/logos/logo_ingame.png",
)
MENU_ONCLICK_URL: str = config("MENU_ONCLICK_URL", default="https://akatsuki.pw")
DATADOG_API_KEY: Secret = config("DATADOG_API_KEY", cast=Secret)
DATADOG_APP_KEY: Secret = config("DATADOG_APP_KEY", cast=Secret)
DEBUG: bool = config("DEBUG", cast=bool, default=False)
REDIRECT_OSU_URLS: bool = config("REDIRECT_OSU_URLS", cast=bool, default=True)
PP_CACHED_ACCURACIES: list[int] = [
int(acc)
for acc in config(
"PP_CACHED_ACCS",
cast=CommaSeparatedStrings,
)
]
PP_CACHED_SCORES: list[int] = [
int(score)
for score in config(
"PP_CACHED_SCORES",
cast=CommaSeparatedStrings,
)
]
DISALLOWED_NAMES: CommaSeparatedStrings = config(
"DISALLOWED_NAMES",
cast=CommaSeparatedStrings,
)
DISALLOWED_PASSWORDS: CommaSeparatedStrings = config(
"DISALLOWED_PASSWORDS",
cast=CommaSeparatedStrings,
)
DISCORD_AUDIT_LOG_WEBHOOK: str = config("DISCORD_AUDIT_LOG_WEBHOOK")
AUTOMATICALLY_REPORT_PROBLEMS: bool = config(
"AUTOMATICALLY_REPORT_PROBLEMS",
cast=bool,
default=True,
)
# advanced dev settings
## WARNING: only touch this once you've
## read through what it enables.
## you could put your server at risk.
DEVELOPER_MODE: bool = config("DEVELOPER_MODE", cast=bool, default=False)
## WARNING: only touch this if you know how
## the migrations system works.
## you'll regret it.
VERSION = "4.3.2"
| 27.387097 | 81 | 0.713388 |
7958eabaa3d9f6b06c07ab735b469b5247f9f30e | 984 | py | Python | pw_watch/py/setup.py | LuDuda/pigweed | dcd7230895a234156bc7b6e5061e6936627c5fbb | [
"Apache-2.0"
] | 1 | 2020-12-19T19:42:46.000Z | 2020-12-19T19:42:46.000Z | pw_watch/py/setup.py | LuDuda/pigweed | dcd7230895a234156bc7b6e5061e6936627c5fbb | [
"Apache-2.0"
] | 3 | 2021-03-11T06:53:56.000Z | 2022-02-13T21:59:25.000Z | pw_watch/py/setup.py | LuDuda/pigweed | dcd7230895a234156bc7b6e5061e6936627c5fbb | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""pw_watch"""
import setuptools # type: ignore
setuptools.setup(
name='pw_watch',
version='0.0.1',
author='Pigweed Authors',
author_email='pigweed-developers@googlegroups.com',
description='Pigweed automatic builder',
packages=setuptools.find_packages(),
package_data={'pw_watch': ['py.typed']},
zip_safe=False,
install_requires=[
'watchdog',
],
)
| 31.741935 | 79 | 0.722561 |
7958eae286ceae37a18c61fdab57b216f7ccbc97 | 77 | py | Python | steemexchange/__init__.py | cryptomental/python-goloslib | 0766f9b48e478bc3cdd18b22a6b5867b82a9f81e | [
"MIT"
] | 1 | 2017-04-08T05:08:35.000Z | 2017-04-08T05:08:35.000Z | steemexchange/__init__.py | cryptomental/python-goloslib | 0766f9b48e478bc3cdd18b22a6b5867b82a9f81e | [
"MIT"
] | null | null | null | steemexchange/__init__.py | cryptomental/python-goloslib | 0766f9b48e478bc3cdd18b22a6b5867b82a9f81e | [
"MIT"
] | null | null | null | from steemexchange.exchange import SteemExchange
__all__ = ['steemexchange']
| 25.666667 | 48 | 0.831169 |
7958eb7d4b058594941024cd3c6a352e6c0dcd3f | 741 | py | Python | leetcode/easy/NumberOf1Bits.py | cheshtaaagarrwal/DS-Algos | d64f07355a0ea4342e868a359f34be28c183f8ff | [
"MIT"
] | null | null | null | leetcode/easy/NumberOf1Bits.py | cheshtaaagarrwal/DS-Algos | d64f07355a0ea4342e868a359f34be28c183f8ff | [
"MIT"
] | null | null | null | leetcode/easy/NumberOf1Bits.py | cheshtaaagarrwal/DS-Algos | d64f07355a0ea4342e868a359f34be28c183f8ff | [
"MIT"
] | 1 | 2021-10-11T23:11:55.000Z | 2021-10-11T23:11:55.000Z | # Write a function that takes an unsigned integer and
# returns the number of '1' bits it has (also known as the Hamming weight).
# Example 1:
# Input: 11
# Output: 3
# Explanation: Integer 11 has binary representation 00000000000000000000000000001011
# Example 2:
# Input: 128
# Output: 1
# Explanation: Integer 128 has binary representation 00000000000000000000000010000000
# Note: For this question, review Bit Manipulation trick from Leetcode
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
int_in_bin = "{0:032b}".format(n)
count = 0
for cur in int_in_bin:
if cur == '1':
count += 1
return count
| 23.903226 | 85 | 0.642375 |
7958ebd128d8506a778306b4b8ba80855b416c85 | 1,567 | py | Python | tests/garage/envs/test_point_env.py | fangqyi/garage | ddafba385ef005f46f913ab352f9638760e5b412 | [
"MIT"
] | 1 | 2021-03-02T08:43:20.000Z | 2021-03-02T08:43:20.000Z | tests/garage/envs/test_point_env.py | fangqyi/garage | ddafba385ef005f46f913ab352f9638760e5b412 | [
"MIT"
] | null | null | null | tests/garage/envs/test_point_env.py | fangqyi/garage | ddafba385ef005f46f913ab352f9638760e5b412 | [
"MIT"
] | null | null | null | import pickle
import numpy as np
from garage.envs.point_env import PointEnv
from tests.helpers import step_env
class TestPointEnv:
def test_pickleable(self):
env = PointEnv()
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip
step_env(round_trip)
env.close()
round_trip.close()
def test_does_not_modify_action(self):
env = PointEnv()
a = env.action_space.sample()
a_copy = a.copy()
env.reset()
env.step(a)
assert a.all() == a_copy.all()
env.close()
def test_observation_space(self):
env = PointEnv()
obs_space = env.observation_space
a = env.action_space.sample()
obs, _, _, _ = env.step(a)
assert obs_space.contains(obs)
def test_reset(self):
env = PointEnv()
assert (env._point == np.array([0, 0])).all()
a = env.action_space.sample()
_ = env.step(a)
env.reset()
assert (env._point == np.array([0, 0])).all()
def test_task(self):
env = PointEnv()
tasks = env.sample_tasks(5)
assert len(tasks) == 5
for task in tasks:
env.set_task(task)
assert (env._goal == task['goal']).all()
def test_done(self):
env = PointEnv()
for _ in range(1000):
_, _, done, _ = env.step(env._goal)
if done:
break
else:
assert False, 'Should report done'
| 24.873016 | 54 | 0.530951 |
7958ec2c2146ec7a470fc6d23991dddcc2136cff | 1,788 | py | Python | onmt/speech/Augmenter.py | Dan-hbd/NMTGMinor | 84e59ac8391ee78852d7c71afc60c3c8b8e3d44d | [
"MIT"
] | 1 | 2021-06-28T06:26:36.000Z | 2021-06-28T06:26:36.000Z | onmt/speech/Augmenter.py | Dan-hbd/NMTGMinor | 84e59ac8391ee78852d7c71afc60c3c8b8e3d44d | [
"MIT"
] | null | null | null | onmt/speech/Augmenter.py | Dan-hbd/NMTGMinor | 84e59ac8391ee78852d7c71afc60c3c8b8e3d44d | [
"MIT"
] | null | null | null | import math
import torch
from collections import defaultdict
import onmt
import random
class Augmenter(object):
"""
Implementation of the "Spec Augmentation" method
(Only vertical and horizontal masking)
"""
def __init__(self, F=8, mf=2, T=64, max_t=0.2, mt=2,
input_size=40, concat=4):
self.F = F
self.mf = mf
self.T = T
self.max_t = max_t
self.mt = mt
self.input_size = input_size
self.concat = concat
print("[INFO] Spec-Augmentation with F=%d, T=%d" % (F, T))
def augment(self, tensor):
feat_size = tensor.size(1)
original_len = tensor.size(0)
reshape_size = feat_size / self.input_size
tensor = tensor.float()
# First we have to upsample the tensor (if it was downsampled during preprocessing)
# # Copy to a new storage because otherwise it is zeroed permanently`
tensor_ = tensor.view(-1, self.input_size).new(*tensor.size()).copy_(tensor)
for _ in range(self.mf):
# frequency masking (second dimension)
# 40 is the number of features (logmel)
f = int(random.uniform(0.0, self.F))
f_0 = int(random.uniform(0.0, 40 - f))
tensor_[:, f_0:f_0 + f].zero_()
for _ in range(self.mt):
# time masking (first dimension)
t = int(random.uniform(0.0, self.T))
t = min(t, int(self.max_t * original_len))
if original_len - t < 0:
continue
t_0 = int(random.uniform(0.0, original_len - t - 1))
tensor_[t_0: t_0 + t].zero_()
# reshaping back to downsampling
tensor__ = tensor_.view(original_len, feat_size)
return tensor__
| 26.686567 | 91 | 0.574385 |
7958ecdbb803b6ee725169b5fc5e14204e7c0be6 | 3,877 | py | Python | messages.dat reader.py | sharpbitmessage/PyBitmessage | 875144ceff4f83ba50502f0cd91f4753b281b8f4 | [
"MIT"
] | 1 | 2015-06-09T14:01:29.000Z | 2015-06-09T14:01:29.000Z | messages.dat reader.py | sharpbitmessage/PyBitmessage | 875144ceff4f83ba50502f0cd91f4753b281b8f4 | [
"MIT"
] | null | null | null | messages.dat reader.py | sharpbitmessage/PyBitmessage | 875144ceff4f83ba50502f0cd91f4753b281b8f4 | [
"MIT"
] | null | null | null | #This program can be used to print out everything in your Inbox or Sent folders and also take things out of the trash.
#Scroll down to the bottom to see the functions that you can uncomment. Save then run this file.
#The functions only read the database file seem to function just fine even if you have Bitmessage running but you should definitly close it before running the functions to take items out of the trash.
import sqlite3
from time import strftime, localtime
import sys
APPNAME = "PyBitmessage"
from os import path, environ
if sys.platform == 'darwin':
if "HOME" in environ:
appdata = path.join(os.environ["HOME"], "Library/Application support/", APPNAME) + '/'
else:
print 'Could not find home folder, please report this message and your OS X version to the BitMessage Github.'
sys.exit()
elif 'win' in sys.platform:
appdata = path.join(environ['APPDATA'], APPNAME) + '\\'
else:
appdata = path.expanduser(path.join("~", "." + APPNAME + "/"))
conn = sqlite3.connect( appdata + 'messages.dat' )
conn.text_factory = str
cur = conn.cursor()
def readInbox():
print 'Printing everything in inbox table:'
item = '''select * from inbox'''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
for row in output:
print row
def readSent():
print 'Printing everything in Sent table:'
item = '''select * from sent'''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
for row in output:
msgid, toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber, folder = row
print msgid.encode('hex'), toaddress, 'toripe:', toripe.encode('hex'), 'fromaddress:', fromaddress, 'SUBJECT:', repr(subject), 'MESSAGE:', repr(message), 'ACKDATA:', ackdata.encode('hex'), lastactiontime, status, pubkeyretrynumber, msgretrynumber, folder
def readSubscriptions():
print 'Printing everything in subscriptions table:'
item = '''select * from subscriptions'''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
for row in output:
print row
def readPubkeys():
print 'Printing everything in pubkeys table:'
item = '''select hash, havecorrectnonce, transmitdata, time, usedpersonally from pubkeys'''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
for row in output:
hash, havecorrectnonce, transmitdata, time, usedpersonally = row
print 'Hash:', hash.encode('hex'), '\tHave correct nonce:', havecorrectnonce, '\tTime first broadcast:', strftime('%a, %d %b %Y %I:%M %p',localtime(time)), '\tUsed by me personally:', usedpersonally, '\tFull pubkey message:', transmitdata.encode('hex')
def readInventory():
print 'Printing everything in inventory table:'
item = '''select hash, objecttype, streamnumber, payload, receivedtime from inventory'''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
for row in output:
hash, objecttype, streamnumber, payload, receivedtime = row
print 'Hash:', hash.encode('hex'), objecttype, streamnumber, '\t', payload.encode('hex'), '\t', strftime('%a, %d %b %Y %I:%M %p',localtime(receivedtime))
def takeInboxMessagesOutOfTrash():
item = '''update inbox set folder='inbox' where folder='trash' '''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
conn.commit()
print 'done'
def takeSentMessagesOutOfTrash():
item = '''update sent set folder='sent' where folder='trash' '''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
conn.commit()
print 'done'
#takeInboxMessagesOutOfTrash()
#takeSentMessagesOutOfTrash()
readInbox()
#readSent()
#readPubkeys()
#readSubscriptions()
#readInventory()
| 38.77 | 262 | 0.686355 |
7958ed2381685f56e708077ff3b1e0038534a875 | 58,252 | py | Python | twisted/mail/pop3.py | Laharah/twisted | bf436c724c94268ef470699577356ea887c503cd | [
"MIT",
"Unlicense"
] | null | null | null | twisted/mail/pop3.py | Laharah/twisted | bf436c724c94268ef470699577356ea887c503cd | [
"MIT",
"Unlicense"
] | null | null | null | twisted/mail/pop3.py | Laharah/twisted | bf436c724c94268ef470699577356ea887c503cd | [
"MIT",
"Unlicense"
] | null | null | null | # -*- test-case-name: twisted.mail.test.test_pop3 -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Post-office Protocol version 3.
@author: Glyph Lefkowitz
@author: Jp Calderone
"""
import base64
import binascii
import warnings
from hashlib import md5
from zope.interface import implements, Interface
from twisted.mail import smtp
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import task
from twisted.internet import defer
from twisted.internet import interfaces
from twisted.python import log
from twisted import cred
##
## Authentication
##
class APOPCredentials:
"""
Credentials for use in APOP authentication.
@ivar magic: See L{__init__}
@ivar username: See L{__init__}
@ivar digest: See L{__init__}
"""
implements(cred.credentials.IUsernamePassword)
def __init__(self, magic, username, digest):
"""
@type magic: L{bytes}
@param magic: The challenge string used to encrypt the password.
@type username: L{bytes}
@param username: The username associated with these credentials.
@type digest: L{bytes}
@param digest: An encrypted version of the user's password. Should be
generated as an MD5 hash of the challenge string concatenated with
the plaintext password.
"""
self.magic = magic
self.username = username
self.digest = digest
def checkPassword(self, password):
"""
Validate a plaintext password against the credentials.
@type password: L{bytes}
@param password: A plaintext password.
@rtype: L{bool}
@return: C{True} if the credentials represented by this object match
the given password, C{False} if they do not.
"""
seed = self.magic + password
myDigest = md5(seed).hexdigest()
return myDigest == self.digest
class _HeadersPlusNLines:
"""
A utility class to retrieve the header and some lines of the body of a mail
message.
@ivar f: See L{__init__}
@ivar n: See L{__init__}
@type linecount: L{int}
@ivar linecount: The number of full lines of the message body scanned.
@type headers: L{bool}
@ivar headers: An indication of which part of the message is being scanned.
C{True} for the header and C{False} for the body.
@type done: L{bool}
@ivar done: A flag indicating when the desired part of the message has been
scanned.
@type buf: L{bytes}
@ivar buf: The portion of the message body that has been scanned, up to
C{n} lines.
"""
def __init__(self, f, n):
"""
@type f: file-like object
@param f: A file containing a mail message.
@type n: L{int}
@param n: The number of lines of the message body to retrieve.
"""
self.f = f
self.n = n
self.linecount = 0
self.headers = 1
self.done = 0
self.buf = ''
def read(self, bytes):
"""
Scan bytes from the file.
@type bytes: L{int}
@param bytes: The number of bytes to read from the file.
@rtype: L{bytes}
@return: Each portion of the header as it is scanned. Then, full lines
of the message body as they are scanned. When more than one line
of the header and/or body has been scanned, the result is the
concatenation of the lines. When the scan results in no full
lines, the empty string is returned.
"""
if self.done:
return ''
data = self.f.read(bytes)
if not data:
return data
if self.headers:
df, sz = data.find('\r\n\r\n'), 4
if df == -1:
df, sz = data.find('\n\n'), 2
if df != -1:
df += sz
val = data[:df]
data = data[df:]
self.linecount = 1
self.headers = 0
else:
val = ''
if self.linecount > 0:
dsplit = (self.buf+data).split('\n')
self.buf = dsplit[-1]
for ln in dsplit[:-1]:
if self.linecount > self.n:
self.done = 1
return val
val += (ln + '\n')
self.linecount += 1
return val
else:
return data
class _POP3MessageDeleted(Exception):
"""
An internal control-flow error which indicates that a deleted message was
requested.
"""
class POP3Error(Exception):
"""
The base class for POP3 errors.
"""
pass
class _IteratorBuffer(object):
"""
An iterator which buffers the elements of a container and periodically
passes them as input to a writer.
@ivar write: See L{__init__}.
@ivar memoryBufferSize: See L{__init__}.
@type bufSize: L{int}
@ivar bufSize: The number of bytes currently in the buffer.
@type lines: L{list} of L{bytes}
@ivar lines: The buffer, which is a list of strings.
@type iterator: iterator which yields L{bytes}
@ivar iterator: An iterator over a container of strings.
"""
bufSize = 0
def __init__(self, write, iterable, memoryBufferSize=None):
"""
@type write: callable that takes L{list} of L{bytes}
@param write: A writer which is a callable that takes a list of
strings.
@type iterable: iterable which yields L{bytes}
@param iterable: An iterable container of strings.
@type memoryBufferSize: L{int} or L{NoneType <types.NoneType>}
@param memoryBufferSize: The number of bytes to buffer before flushing
the buffer to the writer.
"""
self.lines = []
self.write = write
self.iterator = iter(iterable)
if memoryBufferSize is None:
memoryBufferSize = 2 ** 16
self.memoryBufferSize = memoryBufferSize
def __iter__(self):
"""
Return an iterator.
@rtype: iterator which yields L{bytes}
@return: An iterator over strings.
"""
return self
def next(self):
"""
Get the next string from the container, buffer it, and possibly send
the buffer to the writer.
The contents of the buffer are written when it is full or when no
further values are available from the container.
@raise StopIteration: When no further values are available from the
container.
"""
try:
v = self.iterator.next()
except StopIteration:
if self.lines:
self.write(self.lines)
# Drop some references, in case they're edges in a cycle.
del self.iterator, self.lines, self.write
raise
else:
if v is not None:
self.lines.append(v)
self.bufSize += len(v)
if self.bufSize > self.memoryBufferSize:
self.write(self.lines)
self.lines = []
self.bufSize = 0
def iterateLineGenerator(proto, gen):
"""
Direct the output of an iterator to the transport of a protocol and arrange
for iteration to take place.
@type proto: L{POP3}
@param proto: A POP3 server protocol.
@type gen: iterator which yields L{bytes}
@param gen: An iterator over strings.
@rtype: L{Deferred <defer.Deferred>}
@return: A deferred which fires when the iterator finishes.
"""
coll = _IteratorBuffer(proto.transport.writeSequence, gen)
return proto.schedule(coll)
def successResponse(response):
"""
Format an object as a positive response.
@type response: stringifyable L{object}
@param response: An object with a string representation.
@rtype: L{bytes}
@return: A positive POP3 response string.
"""
response = str(response)
return '+OK %s\r\n' % (response,)
def formatStatResponse(msgs):
"""
Format a list of message sizes into a STAT response.
This generator function is intended to be used with
L{Cooperator <twisted.internet.task.Cooperator>}.
@type msgs: L{list} of L{int}
@param msgs: A list of message sizes.
@rtype: L{NoneType <types.NoneType>} or L{bytes}
@return: Yields none until a result is available, then a string that is
suitable for use in a STAT response. The string consists of the number
of messages and the total size of the messages in octets.
"""
i = 0
bytes = 0
for size in msgs:
i += 1
bytes += size
yield None
yield successResponse('%d %d' % (i, bytes))
def formatListLines(msgs):
"""
Format a list of message sizes for use in a LIST response.
@type msgs: L{list} of L{int}
@param msgs: A list of message sizes.
@rtype: L{bytes}
@return: Yields a series of strings that are suitable for use as scan
listings in a LIST response. Each string consists of a message number
and its size in octets.
"""
i = 0
for size in msgs:
i += 1
yield '%d %d\r\n' % (i, size)
def formatListResponse(msgs):
"""
Format a list of message sizes into a complete LIST response.
This generator function is intended to be used with
L{Cooperator <twisted.internet.task.Cooperator>}.
@type msgs: L{list} of L{int}
@param msgs: A list of message sizes.
@rtype: L{bytes}
@return: Yields a series of strings which make up a complete LIST response.
"""
yield successResponse(len(msgs))
for ele in formatListLines(msgs):
yield ele
yield '.\r\n'
def formatUIDListLines(msgs, getUidl):
"""
Format a list of message sizes for use in a UIDL response.
@type msgs: L{list} of L{int}
@param msgs: A list of message sizes.
@rtype: L{bytes}
@return: Yields a series of strings that are suitable for use as unique-id
listings in a UIDL response. Each string consists of a message number
and its unique id.
"""
for i, m in enumerate(msgs):
if m is not None:
uid = getUidl(i)
yield '%d %s\r\n' % (i + 1, uid)
def formatUIDListResponse(msgs, getUidl):
"""
Format a list of message sizes into a complete UIDL response.
This generator function is intended to be used with
L{Cooperator <twisted.internet.task.Cooperator>}.
@type msgs: L{list} of L{int}
@param msgs: A list of message sizes.
@rtype: L{bytes}
@return: Yields a series of strings which make up a complete UIDL response.
"""
yield successResponse('')
for ele in formatUIDListLines(msgs, getUidl):
yield ele
yield '.\r\n'
class POP3(basic.LineOnlyReceiver, policies.TimeoutMixin):
"""
A POP3 server protocol.
@type portal: L{Portal}
@ivar portal: A portal for authentication.
@type factory: L{IServerFactory} provider
@ivar factory: A server factory which provides an interface for querying
capabilities of the server.
@type timeOut: L{int}
@ivar timeOut: The number of seconds to wait for a command from the client
before disconnecting.
@type schedule: callable that takes interator and returns
L{Deferred <defer.Deferred>}
@ivar schedule: A callable that arranges for an iterator to be
cooperatively iterated over along with all other iterators which have
been passed to it such that runtime is divided between all of them. It
returns a deferred which fires when the iterator finishes.
@type magic: L{bytes} or L{NoneType <types.NoneType>}
@ivar magic: An APOP challenge. If not set, an APOP challenge string
will be generated when a connection is made.
@type _userIs: L{bytes} or L{NoneType <types.NoneType>}
@ivar _userIs: The username sent with the USER command.
@type _onLogout: no-argument callable or L{NoneType <types.NoneType>}
@ivar _onLogout: The function to be executed when the connection is
lost.
@type mbox: L{IMailbox} provider
@ivar mbox: The mailbox for the authenticated user.
@type state: L{bytes}
@ivar state: The state which indicates what type of messages are expected
from the client. Valid states are 'COMMAND' and 'AUTH'
@type blocked: L{NoneType <types.NoneType>} or L{list} of 2-L{tuple} of
(E{1}) L{bytes} (E{2}) L{tuple} of L{bytes}
@ivar blocked: A list of blocked commands. While a response to a command
is being generated by the server, other commands are blocked. When
no command is outstanding, C{blocked} is set to none. Otherwise, it
contains a list of information about blocked commands. Each list
entry consists of the command and the arguments to the command.
@type _highest: L{int}
@ivar _highest: The 1-based index of the highest message retrieved.
@type _auth: L{IUsernameHashedPassword
<cred.credentials.IUsernameHashedPassword>} provider
@ivar _auth: Authorization credentials.
"""
implements(interfaces.IProducer)
magic = None
_userIs = None
_onLogout = None
AUTH_CMDS = ['CAPA', 'USER', 'PASS', 'APOP', 'AUTH', 'RPOP', 'QUIT']
portal = None
factory = None
# The mailbox we're serving
mbox = None
# Set this pretty low -- POP3 clients are expected to log in, download
# everything, and log out.
timeOut = 300
state = "COMMAND"
# PIPELINE
blocked = None
# Cooperate and suchlike.
schedule = staticmethod(task.coiterate)
_highest = 0
def connectionMade(self):
"""
Send a greeting to the client after the connection has been made.
"""
if self.magic is None:
self.magic = self.generateMagic()
self.successResponse(self.magic)
self.setTimeout(self.timeOut)
if getattr(self.factory, 'noisy', True):
log.msg("New connection from " + str(self.transport.getPeer()))
def connectionLost(self, reason):
"""
Clean up when the connection has been lost.
@type reason: L{Failure}
@param reason: The reason the connection was terminated.
"""
if self._onLogout is not None:
self._onLogout()
self._onLogout = None
self.setTimeout(None)
def generateMagic(self):
"""
Generate an APOP challenge.
@rtype: L{bytes}
@return: An RFC 822 message id format string.
"""
return smtp.messageid()
def successResponse(self, message=''):
"""
Send a response indicating success.
@type message: stringifyable L{object}
@param message: An object whose string representation should be
included in the response.
"""
self.transport.write(successResponse(message))
def failResponse(self, message=''):
"""
Send a response indicating failure.
@type message: stringifyable L{object}
@param message: An object whose string representation should be
included in the response.
"""
self.sendLine('-ERR ' + str(message))
def lineReceived(self, line):
"""
Pass a received line to a state machine function.
@type line: L{bytes}
@param line: A received line.
"""
self.resetTimeout()
getattr(self, 'state_' + self.state)(line)
def _unblock(self, _):
"""
Process as many blocked commands as possible.
If there are no more blocked commands, set up for the next command to
be sent immediately.
@type _: L{object}
@param _: Ignored.
"""
commands = self.blocked
self.blocked = None
while commands and self.blocked is None:
cmd, args = commands.pop(0)
self.processCommand(cmd, *args)
if self.blocked is not None:
self.blocked.extend(commands)
def state_COMMAND(self, line):
"""
Handle received lines for the COMMAND state in which commands from the
client are expected.
@type line: L{bytes}
@param line: A received command.
"""
try:
return self.processCommand(*line.split(' '))
except (ValueError, AttributeError, POP3Error, TypeError) as e:
log.err()
self.failResponse('bad protocol or server: %s: %s' % (e.__class__.__name__, e))
def processCommand(self, command, *args):
"""
Dispatch a command from the client for handling.
@type command: L{bytes}
@param command: A POP3 command.
@type args: L{tuple} of L{bytes}
@param args: Arguments to the command.
@raise POP3Error: When the command is invalid or the command requires
prior authentication which hasn't been performed.
"""
if self.blocked is not None:
self.blocked.append((command, args))
return
command = command.upper()
authCmd = command in self.AUTH_CMDS
if not self.mbox and not authCmd:
raise POP3Error("not authenticated yet: cannot do " + command)
f = getattr(self, 'do_' + command, None)
if f:
return f(*args)
raise POP3Error("Unknown protocol command: " + command)
def listCapabilities(self):
"""
Return a list of server capabilities suitable for use in a CAPA
response.
@rtype: L{list} of L{bytes}
@return: A list of server capabilities.
"""
baseCaps = [
"TOP",
"USER",
"UIDL",
"PIPELINE",
"CELERITY",
"AUSPEX",
"POTENCE",
]
if IServerFactory.providedBy(self.factory):
# Oh my god. We can't just loop over a list of these because
# each has spectacularly different return value semantics!
try:
v = self.factory.cap_IMPLEMENTATION()
except NotImplementedError:
pass
except:
log.err()
else:
baseCaps.append("IMPLEMENTATION " + str(v))
try:
v = self.factory.cap_EXPIRE()
except NotImplementedError:
pass
except:
log.err()
else:
if v is None:
v = "NEVER"
if self.factory.perUserExpiration():
if self.mbox:
v = str(self.mbox.messageExpiration)
else:
v = str(v) + " USER"
v = str(v)
baseCaps.append("EXPIRE " + v)
try:
v = self.factory.cap_LOGIN_DELAY()
except NotImplementedError:
pass
except:
log.err()
else:
if self.factory.perUserLoginDelay():
if self.mbox:
v = str(self.mbox.loginDelay)
else:
v = str(v) + " USER"
v = str(v)
baseCaps.append("LOGIN-DELAY " + v)
try:
v = self.factory.challengers
except AttributeError:
pass
except:
log.err()
else:
baseCaps.append("SASL " + ' '.join(v.keys()))
return baseCaps
def do_CAPA(self):
"""
Handle a CAPA command.
Respond with the server capabilities.
"""
self.successResponse("I can do the following:")
for cap in self.listCapabilities():
self.sendLine(cap)
self.sendLine(".")
def do_AUTH(self, args=None):
"""
Handle an AUTH command.
If the AUTH extension is not supported, send an error response. If an
authentication mechanism was not specified in the command, send a list
of all supported authentication methods. Otherwise, send an
authentication challenge to the client and transition to the
AUTH state.
@type args: L{bytes} or L{NoneType <types.NoneType>}
@param args: The name of an authentication mechanism.
"""
if not getattr(self.factory, 'challengers', None):
self.failResponse("AUTH extension unsupported")
return
if args is None:
self.successResponse("Supported authentication methods:")
for a in self.factory.challengers:
self.sendLine(a.upper())
self.sendLine(".")
return
auth = self.factory.challengers.get(args.strip().upper())
if not self.portal or not auth:
self.failResponse("Unsupported SASL selected")
return
self._auth = auth()
chal = self._auth.getChallenge()
self.sendLine('+ ' + base64.encodestring(chal).rstrip('\n'))
self.state = 'AUTH'
def state_AUTH(self, line):
"""
Handle received lines for the AUTH state in which an authentication
challenge response from the client is expected.
Transition back to the COMMAND state. Check the credentials and
complete the authorization process with the L{_cbMailbox}
callback function on success or the L{_ebMailbox} and L{_ebUnexpected}
errback functions on failure.
@type line: L{bytes}
@param line: The challenge response.
"""
self.state = "COMMAND"
try:
parts = base64.decodestring(line).split(None, 1)
except binascii.Error:
self.failResponse("Invalid BASE64 encoding")
else:
if len(parts) != 2:
self.failResponse("Invalid AUTH response")
return
self._auth.username = parts[0]
self._auth.response = parts[1]
d = self.portal.login(self._auth, None, IMailbox)
d.addCallback(self._cbMailbox, parts[0])
d.addErrback(self._ebMailbox)
d.addErrback(self._ebUnexpected)
def do_APOP(self, user, digest):
"""
Handle an APOP command.
Perform APOP authentication and complete the authorization process with
the L{_cbMailbox} callback function on success or the L{_ebMailbox}
and L{_ebUnexpected} errback functions on failure.
@type user: L{bytes}
@param user: A username.
@type digest: L{bytes}
@param digest: An MD5 digest string.
"""
d = defer.maybeDeferred(self.authenticateUserAPOP, user, digest)
d.addCallbacks(self._cbMailbox, self._ebMailbox, callbackArgs=(user,)
).addErrback(self._ebUnexpected)
def _cbMailbox(self, (interface, avatar, logout), user):
"""
Complete successful authentication.
Save the mailbox and logout function for the authenticated user and
send a successful response to the client.
@type interface: C{zope.interface.Interface}
@param interface: The interface supported by the avatar.
@type avatar: L{IMailbox} provider
@param avatar: The mailbox for the authenticated user.
@type logout: no-argument callable
@param logout: The function to be invoked when the session is
terminated.
@type user: L{bytes}
@param user: The user being authenticated.
"""
if interface is not IMailbox:
self.failResponse('Authentication failed')
log.err("_cbMailbox() called with an interface other than IMailbox")
return
self.mbox = avatar
self._onLogout = logout
self.successResponse('Authentication succeeded')
if getattr(self.factory, 'noisy', True):
log.msg("Authenticated login for " + user)
def _ebMailbox(self, failure):
"""
Handle an expected authentication failure.
Send an appropriate error response for a L{LoginDenied} or
L{LoginFailed} authentication failure.
@type failure: L{Failure}
@param failure: The authentication error.
"""
failure = failure.trap(cred.error.LoginDenied, cred.error.LoginFailed)
if issubclass(failure, cred.error.LoginDenied):
self.failResponse("Access denied: " + str(failure))
elif issubclass(failure, cred.error.LoginFailed):
self.failResponse('Authentication failed')
if getattr(self.factory, 'noisy', True):
log.msg("Denied login attempt from " + str(self.transport.getPeer()))
def _ebUnexpected(self, failure):
"""
Handle an unexpected authentication failure.
Send an error response for an unexpected authentication failure.
@type failure: L{Failure}
@param failure: The authentication error.
"""
self.failResponse('Server error: ' + failure.getErrorMessage())
log.err(failure)
def do_USER(self, user):
"""
Handle a USER command.
Save the username and send a successful response prompting the client
for the password.
@type user: L{bytes}
@param user: A username.
"""
self._userIs = user
self.successResponse('USER accepted, send PASS')
def do_PASS(self, password):
"""
Handle a PASS command.
If a USER command was previously received, authenticate the user and
complete the authorization process with the L{_cbMailbox} callback
function on success or the L{_ebMailbox} and L{_ebUnexpected} errback
functions on failure. If a USER command was not previously received,
send an error response.
@type password: L{bytes}
@param password: A password.
"""
if self._userIs is None:
self.failResponse("USER required before PASS")
return
user = self._userIs
self._userIs = None
d = defer.maybeDeferred(self.authenticateUserPASS, user, password)
d.addCallbacks(self._cbMailbox, self._ebMailbox, callbackArgs=(user,)
).addErrback(self._ebUnexpected)
def _longOperation(self, d):
"""
Stop timeouts and block further command processing while a long
operation completes.
@type d: L{Deferred <defer.Deferred>}
@param d: A deferred which triggers at the completion of a long
operation.
@rtype: L{Deferred <defer.Deferred>}
@return: A deferred which triggers after command processing resumes and
timeouts restart after the completion of a long operation.
"""
timeOut = self.timeOut
self.setTimeout(None)
self.blocked = []
d.addCallback(self._unblock)
d.addCallback(lambda ign: self.setTimeout(timeOut))
return d
def _coiterate(self, gen):
"""
Direct the output of an iterator to the transport and arrange for
iteration to take place.
@type gen: iterable which yields L{bytes}
@param gen: An iterator over strings.
@rtype: L{Deferred <defer.Deferred>}
@return: A deferred which fires when the iterator finishes.
"""
return self.schedule(_IteratorBuffer(self.transport.writeSequence, gen))
def do_STAT(self):
"""
Handle a STAT command.
@rtype: L{Deferred <defer.Deferred>}
@return: A deferred which triggers after the response to the STAT
command has been issued.
"""
d = defer.maybeDeferred(self.mbox.listMessages)
def cbMessages(msgs):
return self._coiterate(formatStatResponse(msgs))
def ebMessages(err):
self.failResponse(err.getErrorMessage())
log.msg("Unexpected do_STAT failure:")
log.err(err)
return self._longOperation(d.addCallbacks(cbMessages, ebMessages))
def do_LIST(self, i=None):
"""
Handle a LIST command.
@type i: L{bytes} or L{NoneType <types.NoneType>}
@param i: A 1-based message index.
@rtype: L{Deferred <defer.Deferred>}
@return: A deferred which triggers after the response to the LIST
command has been issued.
"""
if i is None:
d = defer.maybeDeferred(self.mbox.listMessages)
def cbMessages(msgs):
return self._coiterate(formatListResponse(msgs))
def ebMessages(err):
self.failResponse(err.getErrorMessage())
log.msg("Unexpected do_LIST failure:")
log.err(err)
return self._longOperation(d.addCallbacks(cbMessages, ebMessages))
else:
try:
i = int(i)
if i < 1:
raise ValueError()
except ValueError:
self.failResponse("Invalid message-number: %r" % (i,))
else:
d = defer.maybeDeferred(self.mbox.listMessages, i - 1)
def cbMessage(msg):
self.successResponse('%d %d' % (i, msg))
def ebMessage(err):
errcls = err.check(ValueError, IndexError)
if errcls is not None:
if errcls is IndexError:
# IndexError was supported for a while, but really
# shouldn't be. One error condition, one exception
# type. See ticket #6669.
warnings.warn(
"twisted.mail.pop3.IMailbox.listMessages may not "
"raise IndexError for out-of-bounds message numbers: "
"raise ValueError instead.",
PendingDeprecationWarning)
self.failResponse("Invalid message-number: %r" % (i,))
else:
self.failResponse(err.getErrorMessage())
log.msg("Unexpected do_LIST failure:")
log.err(err)
return self._longOperation(d.addCallbacks(cbMessage, ebMessage))
def do_UIDL(self, i=None):
"""
Handle a UIDL command.
@type i: L{bytes} or L{NoneType <types.NoneType>}
@param i: A 1-based message index.
@rtype: L{Deferred <defer.Deferred>}
@return: A deferred which triggers after the response to the UIDL
command has been issued.
"""
if i is None:
d = defer.maybeDeferred(self.mbox.listMessages)
def cbMessages(msgs):
return self._coiterate(formatUIDListResponse(msgs, self.mbox.getUidl))
def ebMessages(err):
self.failResponse(err.getErrorMessage())
log.msg("Unexpected do_UIDL failure:")
log.err(err)
return self._longOperation(d.addCallbacks(cbMessages, ebMessages))
else:
try:
i = int(i)
if i < 1:
raise ValueError()
except ValueError:
self.failResponse("Bad message number argument")
else:
try:
msg = self.mbox.getUidl(i - 1)
except IndexError:
# XXX TODO See above comment regarding IndexError.
warnings.warn(
"twisted.mail.pop3.IMailbox.getUidl may not "
"raise IndexError for out-of-bounds message numbers: "
"raise ValueError instead.",
PendingDeprecationWarning)
self.failResponse("Bad message number argument")
except ValueError:
self.failResponse("Bad message number argument")
else:
self.successResponse(str(msg))
def _getMessageFile(self, i):
"""
Retrieve the size and contents of a message.
@type i: L{bytes}
@param i: A 1-based message index.
@rtype: L{Deferred <defer.Deferred>} which successfully fires with
2-L{tuple} of (E{1}) L{int}, (E{2}) file-like object
@return: A deferred which successfully fires with the size of the
message and a file containing the contents of the message.
"""
try:
msg = int(i) - 1
if msg < 0:
raise ValueError()
except ValueError:
self.failResponse("Bad message number argument")
return defer.succeed(None)
sizeDeferred = defer.maybeDeferred(self.mbox.listMessages, msg)
def cbMessageSize(size):
if not size:
return defer.fail(_POP3MessageDeleted())
fileDeferred = defer.maybeDeferred(self.mbox.getMessage, msg)
fileDeferred.addCallback(lambda fObj: (size, fObj))
return fileDeferred
def ebMessageSomething(err):
errcls = err.check(_POP3MessageDeleted, ValueError, IndexError)
if errcls is _POP3MessageDeleted:
self.failResponse("message deleted")
elif errcls in (ValueError, IndexError):
if errcls is IndexError:
# XXX TODO See above comment regarding IndexError.
warnings.warn(
"twisted.mail.pop3.IMailbox.listMessages may not "
"raise IndexError for out-of-bounds message numbers: "
"raise ValueError instead.",
PendingDeprecationWarning)
self.failResponse("Bad message number argument")
else:
log.msg("Unexpected _getMessageFile failure:")
log.err(err)
return None
sizeDeferred.addCallback(cbMessageSize)
sizeDeferred.addErrback(ebMessageSomething)
return sizeDeferred
def _sendMessageContent(self, i, fpWrapper, successResponse):
"""
Send the contents of a message.
@type i: L{bytes}
@param i: A 1-based message index.
@type fpWrapper: callable that takes a file-like object and returns
a file-like object
@param fpWrapper:
@type successResponse: callable that takes L{int} and returns
L{bytes}
@param successResponse:
@rtype: L{Deferred}
@return: A deferred which triggers after the message has been sent.
"""
d = self._getMessageFile(i)
def cbMessageFile(info):
if info is None:
# Some error occurred - a failure response has been sent
# already, just give up.
return
self._highest = max(self._highest, int(i))
resp, fp = info
fp = fpWrapper(fp)
self.successResponse(successResponse(resp))
s = basic.FileSender()
d = s.beginFileTransfer(fp, self.transport, self.transformChunk)
def cbFileTransfer(lastsent):
if lastsent != '\n':
line = '\r\n.'
else:
line = '.'
self.sendLine(line)
def ebFileTransfer(err):
self.transport.loseConnection()
log.msg("Unexpected error in _sendMessageContent:")
log.err(err)
d.addCallback(cbFileTransfer)
d.addErrback(ebFileTransfer)
return d
return self._longOperation(d.addCallback(cbMessageFile))
def do_TOP(self, i, size):
"""
Handle a TOP command.
@type i: L{bytes}
@param i: A 1-based message index.
@type size: L{bytes}
@param size: The number of lines of the message to retrieve.
@rtype: L{Deferred}
@return: A deferred which triggers after the response to the TOP
command has been issued.
"""
try:
size = int(size)
if size < 0:
raise ValueError
except ValueError:
self.failResponse("Bad line count argument")
else:
return self._sendMessageContent(
i,
lambda fp: _HeadersPlusNLines(fp, size),
lambda size: "Top of message follows")
def do_RETR(self, i):
"""
Handle a RETR command.
@type i: L{bytes}
@param i: A 1-based message index.
@rtype: L{Deferred}
@return: A deferred which triggers after the response to the RETR
command has been issued.
"""
return self._sendMessageContent(
i,
lambda fp: fp,
lambda size: "%d" % (size,))
def transformChunk(self, chunk):
"""
Transform a chunk of a message to POP3 message format.
Make sure each line ends with C{'\\r\\n'} and byte-stuff the
termination character (C{'.'}) by adding an extra one when one appears
at the beginning of a line.
@type chunk: L{bytes}
@param chunk: A string to transform.
@rtype: L{bytes}
@return: The transformed string.
"""
return chunk.replace('\n', '\r\n').replace('\r\n.', '\r\n..')
def finishedFileTransfer(self, lastsent):
"""
Send the termination sequence.
@type lastsent: L{bytes}
@param lastsent: The last character of the file.
"""
if lastsent != '\n':
line = '\r\n.'
else:
line = '.'
self.sendLine(line)
def do_DELE(self, i):
"""
Handle a DELE command.
Mark a message for deletion and issue a successful response.
@type i: L{int}
@param i: A 1-based message index.
"""
i = int(i)-1
self.mbox.deleteMessage(i)
self.successResponse()
def do_NOOP(self):
"""
Handle a NOOP command.
Do nothing but issue a successful response.
"""
self.successResponse()
def do_RSET(self):
"""
Handle a RSET command.
Unmark any messages that have been flagged for deletion.
"""
try:
self.mbox.undeleteMessages()
except:
log.err()
self.failResponse()
else:
self._highest = 0
self.successResponse()
def do_LAST(self):
"""
Handle a LAST command.
Respond with the 1-based index of the highest retrieved message.
"""
self.successResponse(self._highest)
def do_RPOP(self, user):
"""
Handle an RPOP command.
RPOP is not supported. Send an error response.
@type user: L{bytes}
@param user: A username.
"""
self.failResponse('permission denied, sucker')
def do_QUIT(self):
"""
Handle a QUIT command.
Remove any messages marked for deletion, issue a successful response,
and drop the connection.
"""
if self.mbox:
self.mbox.sync()
self.successResponse()
self.transport.loseConnection()
def authenticateUserAPOP(self, user, digest):
"""
Perform APOP authentication.
@type user: L{bytes}
@param user: The name of the user attempting to log in.
@type digest: L{bytes}
@param digest: The challenge response.
@rtype: L{Deferred <defer.Deferred>} which successfully results in
3-L{tuple} of (E{1}) L{IMailbox <pop3.IMailbox>}, (E{2})
L{IMailbox <pop3.IMailbox>} provider, (E{3}) no-argument callable
@return: A deferred which fires when authentication is complete. If
successful, it returns an L{IMailbox <pop3.IMailbox>} interface, a
mailbox, and a function to be invoked with the session is
terminated. If authentication fails, the deferred fails with an
L{UnathorizedLogin <cred.error.UnauthorizedLogin>} error.
@raise cred.error.UnauthorizedLogin: When authentication fails.
"""
if self.portal is not None:
return self.portal.login(
APOPCredentials(self.magic, user, digest),
None,
IMailbox
)
raise cred.error.UnauthorizedLogin()
def authenticateUserPASS(self, user, password):
"""
Perform authentication for a username/password login.
@type user: L{bytes}
@param user: The name of the user attempting to log in.
@type password: L{bytes}
@param password: The password to authenticate with.
@rtype: L{Deferred <defer.Deferred>} which successfully results in
3-L{tuple} of (E{1}) L{IMailbox <pop3.IMailbox>}, (E{2}) L{IMailbox
<pop3.IMailbox>} provider, (E{3}) no-argument callable
@return: A deferred which fires when authentication is complete. If
successful, it returns a L{pop3.IMailbox} interface, a mailbox,
and a function to be invoked with the session is terminated.
If authentication fails, the deferred fails with an
L{UnathorizedLogin <cred.error.UnauthorizedLogin>} error.
@raise cred.error.UnauthorizedLogin: When authentication fails.
"""
if self.portal is not None:
return self.portal.login(
cred.credentials.UsernamePassword(user, password),
None,
IMailbox
)
raise cred.error.UnauthorizedLogin()
class IServerFactory(Interface):
"""
An interface for querying capabilities of a POP3 server.
Any cap_* method may raise L{NotImplementedError} if the particular
capability is not supported. If L{cap_EXPIRE()} does not raise
L{NotImplementedError}, L{perUserExpiration()} must be implemented,
otherwise they are optional. If L{cap_LOGIN_DELAY()} is implemented,
L{perUserLoginDelay()} must be implemented, otherwise they are optional.
@type challengers: L{dict} of L{bytes} -> L{IUsernameHashedPassword
<cred.credentials.IUsernameHashedPassword>}
@ivar challengers: A mapping of challenger names to
L{IUsernameHashedPassword <cred.credentials.IUsernameHashedPassword>}
provider.
"""
def cap_IMPLEMENTATION():
"""
Return a string describing the POP3 server implementation.
@rtype: L{bytes}
@return: Server implementation information.
"""
def cap_EXPIRE():
"""
Return the minimum number of days messages are retained.
@rtype: L{int} or L{NoneType <types.NoneType>}
@return: The minimum number of days messages are retained or none, if
the server never deletes messages.
"""
def perUserExpiration():
"""
Indicate whether the message expiration policy differs per user.
@rtype: L{bool}
@return: C{True} when the message expiration policy differs per user,
C{False} otherwise.
"""
def cap_LOGIN_DELAY():
"""
Return the minimum number of seconds between client logins.
@rtype: L{int}
@return: The minimum number of seconds between client logins.
"""
def perUserLoginDelay():
"""
Indicate whether the login delay period differs per user.
@rtype: L{bool}
@return: C{True} when the login delay differs per user, C{False}
otherwise.
"""
class IMailbox(Interface):
"""
An interface for mailbox access.
Message indices are 0-based.
@type loginDelay: L{int}
@ivar loginDelay: The number of seconds between allowed logins for the
user associated with this mailbox.
@type messageExpiration: L{int}
@ivar messageExpiration: The number of days messages in this mailbox will
remain on the server before being deleted.
"""
def listMessages(index=None):
"""
Retrieve the size of a message, or, if none is specified, the size of
each message in the mailbox.
@type index: L{int} or L{NoneType <types.NoneType>}
@param index: The 0-based index of the message.
@rtype: L{int}, sequence of L{int}, or L{Deferred <defer.Deferred>}
@return: The number of octets in the specified message, or, if an
index is not specified, a sequence of the number of octets for
all messages in the mailbox or a deferred which fires with
one of those. Any value which corresponds to a deleted message
is set to 0.
@raise ValueError or IndexError: When the index does not correspond to
a message in the mailbox. The use of ValueError is preferred.
"""
def getMessage(index):
"""
Retrieve a file containing the contents of a message.
@type index: L{int}
@param index: The 0-based index of a message.
@rtype: file-like object
@return: A file containing the message.
@raise ValueError or IndexError: When the index does not correspond to
a message in the mailbox. The use of ValueError is preferred.
"""
def getUidl(index):
"""
Get a unique identifier for a message.
@type index: L{int}
@param index: The 0-based index of a message.
@rtype: L{bytes}
@return: A string of printable characters uniquely identifying the
message for all time.
@raise ValueError or IndexError: When the index does not correspond to
a message in the mailbox. The use of ValueError is preferred.
"""
def deleteMessage(index):
"""
Mark a message for deletion.
This must not change the number of messages in this mailbox. Further
requests for the size of the deleted message should return 0. Further
requests for the message itself may raise an exception.
@type index: L{int}
@param index: The 0-based index of a message.
@raise ValueError or IndexError: When the index does not correspond to
a message in the mailbox. The use of ValueError is preferred.
"""
def undeleteMessages():
"""
Undelete all messages marked for deletion.
Any message which can be undeleted should be returned to its original
position in the message sequence and retain its original UID.
"""
def sync():
"""
Discard the contents of any message marked for deletion.
"""
class Mailbox:
"""
A base class for mailboxes.
"""
implements(IMailbox)
def listMessages(self, i=None):
"""
Retrieve the size of a message, or, if none is specified, the size of
each message in the mailbox.
@type i: L{int} or L{NoneType <types.NoneType>}
@param i: The 0-based index of the message.
@rtype: L{int}, sequence of L{int}, or L{Deferred <defer.Deferred>}
@return: The number of octets in the specified message, or, if an
index is not specified, a sequence of the number of octets for
all messages in the mailbox or a deferred which fires with
one of those. Any value which corresponds to a deleted message
is set to 0.
@raise ValueError or IndexError: When the index does not correspond to
a message in the mailbox. The use of ValueError is preferred.
"""
return []
def getMessage(self, i):
"""
Retrieve a file containing the contents of a message.
@type i: L{int}
@param i: The 0-based index of a message.
@rtype: file-like object
@return: A file containing the message.
@raise ValueError or IndexError: When the index does not correspond to
a message in the mailbox. The use of ValueError is preferred.
"""
raise ValueError
def getUidl(self, i):
"""
Get a unique identifier for a message.
@type i: L{int}
@param i: The 0-based index of a message.
@rtype: L{bytes}
@return: A string of printable characters uniquely identifying the
message for all time.
@raise ValueError or IndexError: When the index does not correspond to
a message in the mailbox. The use of ValueError is preferred.
"""
raise ValueError
def deleteMessage(self, i):
"""
Mark a message for deletion.
This must not change the number of messages in this mailbox. Further
requests for the size of the deleted message should return 0. Further
requests for the message itself may raise an exception.
@type i: L{int}
@param i: The 0-based index of a message.
@raise ValueError or IndexError: When the index does not correspond to
a message in the mailbox. The use of ValueError is preferred.
"""
raise ValueError
def undeleteMessages(self):
"""
Undelete all messages marked for deletion.
Any message which can be undeleted should be returned to its original
position in the message sequence and retain its original UID.
"""
pass
def sync(self):
"""
Discard the contents of any message marked for deletion.
"""
pass
NONE, SHORT, FIRST_LONG, LONG = range(4)
NEXT = {}
NEXT[NONE] = NONE
NEXT[SHORT] = NONE
NEXT[FIRST_LONG] = LONG
NEXT[LONG] = NONE
class POP3Client(basic.LineOnlyReceiver):
"""
A POP3 client protocol.
@type mode: L{int}
@ivar mode: The type of response expected from the server. Choices include
none (0), a one line response (1), the first line of a multi-line
response (2), and subsequent lines of a multi-line response (3).
@type command: L{bytes}
@ivar command: The command most recently sent to the server.
@type welcomeRe: L{RegexObject <re.RegexObject>}
@ivar welcomeRe: A regular expression which matches the APOP challenge in
the server greeting.
@type welcomeCode: L{bytes}
@ivar welcomeCode: The APOP challenge passed in the server greeting.
"""
mode = SHORT
command = 'WELCOME'
import re
welcomeRe = re.compile('<(.*)>')
def __init__(self):
"""
Issue deprecation warning.
"""
import warnings
warnings.warn("twisted.mail.pop3.POP3Client is deprecated, "
"please use twisted.mail.pop3.AdvancedPOP3Client "
"instead.", DeprecationWarning,
stacklevel=3)
def sendShort(self, command, params=None):
"""
Send a POP3 command to which a short response is expected.
@type command: L{bytes}
@param command: A POP3 command.
@type params: stringifyable L{object} or L{NoneType <types.NoneType>}
@param params: Command arguments.
"""
if params is not None:
self.sendLine('%s %s' % (command, params))
else:
self.sendLine(command)
self.command = command
self.mode = SHORT
def sendLong(self, command, params):
"""
Send a POP3 command to which a long response is expected.
@type command: L{bytes}
@param command: A POP3 command.
@type params: stringifyable L{object}
@param params: Command arguments.
"""
if params:
self.sendLine('%s %s' % (command, params))
else:
self.sendLine(command)
self.command = command
self.mode = FIRST_LONG
def handle_default(self, line):
"""
Handle responses from the server for which no other handler exists.
@type line: L{bytes}
@param line: A received line.
"""
if line[:-4] == '-ERR':
self.mode = NONE
def handle_WELCOME(self, line):
"""
Handle a server response which is expected to be a server greeting.
@type line: L{bytes}
@param line: A received line.
"""
code, data = line.split(' ', 1)
if code != '+OK':
self.transport.loseConnection()
else:
m = self.welcomeRe.match(line)
if m:
self.welcomeCode = m.group(1)
def _dispatch(self, command, default, *args):
"""
Dispatch a response from the server for handling.
Command X is dispatched to handle_X() if it exists. If not, it is
dispatched to the default handler.
@type command: L{bytes}
@param command: The command.
@type default: callable that takes L{bytes} or
L{NoneType <types.NoneType>}
@param default: The default handler.
@type args: L{tuple} or L{NoneType <types.NoneType>}
@param args: Arguments to the handler function.
"""
try:
method = getattr(self, 'handle_'+command, default)
if method is not None:
method(*args)
except:
log.err()
def lineReceived(self, line):
"""
Dispatch a received line for processing.
The choice of function to handle the received line is based on the
type of response expected to the command sent to the server and how
much of that response has been received.
An expected one line response to command X is handled by handle_X().
The first line of a multi-line response to command X is also handled by
handle_X(). Subsequent lines of the multi-line response are handled by
handle_X_continue() except for the last line which is handled by
handle_X_end().
@type line: L{bytes}
@param line: A received line.
"""
if self.mode == SHORT or self.mode == FIRST_LONG:
self.mode = NEXT[self.mode]
self._dispatch(self.command, self.handle_default, line)
elif self.mode == LONG:
if line == '.':
self.mode = NEXT[self.mode]
self._dispatch(self.command+'_end', None)
return
if line[:1] == '.':
line = line[1:]
self._dispatch(self.command+"_continue", None, line)
def apopAuthenticate(self, user, password, magic):
"""
Perform an authenticated login.
@type user: L{bytes}
@param user: The username with which to log in.
@type password: L{bytes}
@param password: The password with which to log in.
@type magic: L{bytes}
@param magic: The challenge provided by the server.
"""
digest = md5(magic + password).hexdigest()
self.apop(user, digest)
def apop(self, user, digest):
"""
Send an APOP command to perform authenticated login.
@type user: L{bytes}
@param user: The username with which to log in.
@type digest: L{bytes}
@param digest: The challenge response with which to authenticate.
"""
self.sendLong('APOP', ' '.join((user, digest)))
def retr(self, i):
"""
Send a RETR command to retrieve a message from the server.
@type i: L{int} or L{bytes}
@param i: A 0-based message index.
"""
self.sendLong('RETR', i)
def dele(self, i):
"""
Send a DELE command to delete a message from the server.
@type i: L{int} or L{bytes}
@param i: A 0-based message index.
"""
self.sendShort('DELE', i)
def list(self, i=''):
"""
Send a LIST command to retrieve the size of a message or, if no message
is specified, the sizes of all messages.
@type i: L{int} or L{bytes}
@param i: A 0-based message index or the empty string to specify all
messages.
"""
self.sendLong('LIST', i)
def uidl(self, i=''):
"""
Send a UIDL command to retrieve the unique identifier of a message or,
if no message is specified, the unique identifiers of all messages.
@type i: L{int} or L{bytes}
@param i: A 0-based message index or the empty string to specify all
messages.
"""
self.sendLong('UIDL', i)
def user(self, name):
"""
Send a USER command to perform the first half of a plaintext login.
@type name: L{bytes}
@param name: The username with which to log in.
"""
self.sendShort('USER', name)
def pass_(self, pass_):
"""
Perform the second half of a plaintext login.
@type pass_: L{bytes}
@param pass_: The plaintext password with which to authenticate.
"""
self.sendShort('PASS', pass_)
def quit(self):
"""
Send a QUIT command to disconnect from the server.
"""
self.sendShort('QUIT')
from twisted.mail.pop3client import POP3Client as AdvancedPOP3Client
from twisted.mail.pop3client import POP3ClientError
from twisted.mail.pop3client import InsecureAuthenticationDisallowed
from twisted.mail.pop3client import ServerErrorResponse
from twisted.mail.pop3client import LineTooLong
from twisted.mail.pop3client import TLSError
from twisted.mail.pop3client import TLSNotSupportedError
__all__ = [
# Interfaces
'IMailbox', 'IServerFactory',
# Exceptions
'POP3Error', 'POP3ClientError', 'InsecureAuthenticationDisallowed',
'ServerErrorResponse', 'LineTooLong', 'TLSError', 'TLSNotSupportedError',
# Protocol classes
'POP3', 'POP3Client', 'AdvancedPOP3Client',
# Misc
'APOPCredentials', 'Mailbox']
| 30.968634 | 91 | 0.590589 |
7958ed5c6845d36fd2642d47f8ef83d52e914b2f | 1,145 | py | Python | warriors/modbus_warrior.py | alegrey91/legion | c234c54cc6255e744a0cfde9a9d5909263850480 | [
"MIT"
] | 430 | 2019-06-10T09:43:39.000Z | 2022-03-31T19:46:11.000Z | warriors/modbus_warrior.py | alegrey91/legion | c234c54cc6255e744a0cfde9a9d5909263850480 | [
"MIT"
] | 10 | 2019-09-17T15:48:47.000Z | 2021-02-17T11:09:59.000Z | warriors/modbus_warrior.py | alegrey91/legion | c234c54cc6255e744a0cfde9a9d5909263850480 | [
"MIT"
] | 110 | 2019-06-10T17:22:17.000Z | 2022-03-28T03:23:08.000Z | # -*- coding: utf-8 -*-
from warriors.warrior import Warrior
#You can test this module against querier (10.10.10.125)
class Modbus_warrior (Warrior):
def __init__(self, host, port, workdir, protocol, intensity, username, ulist, password, plist, notuse, extensions, path, reexec, ipv6, domain, interactive, verbose, executed, exec):
Warrior.__init__(self, host, port, workdir, protocol, intensity, username, ulist, password, plist, notuse, extensions, path, reexec, ipv6, domain, interactive, verbose, executed, exec)
self.cmds = [
{"name": self.proto+"_nmap_"+self.port, "cmd": 'nmap-n --script modbus-discover -sV -p ' + self.port + ' ' + self.host, "shell": True, "chain": False},
]
msfmodules = [{"path": "auxiliary/scanner/scada/modbusdetect", "toset": {"RHOSTS": self.host, "RPORT": self.port}},
{"path": "auxiliary/scanner/scada/modbus_findunitid", "toset": {"RHOSTS": self.host, "RPORT": self.port}}
]
self.cmds.append({"name": self.modbus+"_msf_"+self.port, "cmd": self.create_msf_cmd(msfmodules), "shell": True, "chain": False})
| 57.25 | 192 | 0.644541 |
7958edd73bceed0818c5671468c540605e70f4a8 | 810 | py | Python | applymask_2d.py | mcooper12590/4DWormholes | 47eea53377a6d02cf1b5bb8994a68b194b981c55 | [
"MIT"
] | null | null | null | applymask_2d.py | mcooper12590/4DWormholes | 47eea53377a6d02cf1b5bb8994a68b194b981c55 | [
"MIT"
] | null | null | null | applymask_2d.py | mcooper12590/4DWormholes | 47eea53377a6d02cf1b5bb8994a68b194b981c55 | [
"MIT"
] | null | null | null | import imageio
from numpy import array
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i','--ifile')
parser.add_argument('-m','--mfile')
parser.add_argument('-o','--ofile')
parser.add_argument('-r','--resfile')
args = parser.parse_args()
rdir = "/media/maxc/IcyBox/Cooper2021_Data/"
sname = "PZ101"
thins = f"{rdir}{sname}/ThinSections/"
tsnum = "10"
tsdir = f"{thins}{tsnum}/"
if args.mfile:
mfile = args.mfile
else:
mfile = f"{tsdir}{sname}ts_{tsnum}mm_Mask.tif"
mask = (imageio.volread(mfile)-2).astype(bool)
if args.ifile:
ifile = args.ifile
else:
ifile = f"{tsdir}{sname}ts_{tsnum}mm_8bit.tif"
img = imageio.imread(ifile)
res = mask*img
if args.ofile:
ofile = args.ofile
else:
ofile = f"{tsdir}{sname}ts_{tsnum}mm_Masked.tif"
imsave(ofile, res)
| 19.285714 | 52 | 0.687654 |
7958edfc6363c9f565aa125b340b0b87d191b736 | 786 | py | Python | scripts/dhp19/generate_DHP19/Python_Files/normalizeImage3Sigma.py | preshmalinetpereira/lifting_events_to_3d_hpe | 2d7b196dc6ab85e10ae07466657304f324a64a37 | [
"Apache-2.0"
] | null | null | null | scripts/dhp19/generate_DHP19/Python_Files/normalizeImage3Sigma.py | preshmalinetpereira/lifting_events_to_3d_hpe | 2d7b196dc6ab85e10ae07466657304f324a64a37 | [
"Apache-2.0"
] | null | null | null | scripts/dhp19/generate_DHP19/Python_Files/normalizeImage3Sigma.py | preshmalinetpereira/lifting_events_to_3d_hpe | 2d7b196dc6ab85e10ae07466657304f324a64a37 | [
"Apache-2.0"
] | null | null | null | ### File created by preshma
import numpy as np
from statistics import variance as var
import math
def normalizeImage3Sigma(img):
m, n = img.shape
sum_img=np.sum(img)
count_img=np.count_nonzero(img>0)
mean_img = sum_img / count_img
var_img=var(img[img>0])
sig_img = np.sqrt(var_img)
if (sig_img<0.1)/255:
sig_img=0.1/255
numSDevs = 3.0
meanGrey=0
range_= numSDevs * sig_img
halfrange=0
rangenew = 255
for i in range(m):
for j in range(n):
if img[i,j]==0:
img[i,j]=meanGrey
else:
f=(img[i,j]+halfrange)*rangenew/range_
if f>rangenew: f=rangenew
if f<0: f=0
img[i,j]= math.floor(f)
return img | 23.818182 | 54 | 0.550891 |
7958ef679cda428aed01c0bdfa24dfd3991bca77 | 7,284 | py | Python | activitysim/core/input.py | SEMCOG/SEMCOG_ActSim | cc18cce84b2e4b5f380f58c7919953d2cd03ee73 | [
"BSD-3-Clause"
] | null | null | null | activitysim/core/input.py | SEMCOG/SEMCOG_ActSim | cc18cce84b2e4b5f380f58c7919953d2cd03ee73 | [
"BSD-3-Clause"
] | 1 | 2021-06-30T23:39:37.000Z | 2021-06-30T23:39:37.000Z | activitysim/core/input.py | SEMCOG/SEMCOG_ActSim | cc18cce84b2e4b5f380f58c7919953d2cd03ee73 | [
"BSD-3-Clause"
] | null | null | null | # ActivitySim
# See full license in LICENSE.txt.
import logging
import warnings
import os
import pandas as pd
from activitysim.core import (
inject,
config,
util,
)
from activitysim.core import mem
logger = logging.getLogger(__name__)
def read_input_table(tablename):
"""Reads input table name and returns cleaned DataFrame.
Uses settings found in input_table_list in global settings file
Parameters
----------
tablename : string
Returns
-------
pandas DataFrame
"""
table_list = config.setting('input_table_list')
assert table_list is not None, 'no input_table_list found in settings'
table_info = None
for info in table_list:
if info['tablename'] == tablename:
table_info = info
assert table_info is not None, \
f"could not find info for for tablename {tablename} in settings file"
return read_from_table_info(table_info)
def read_from_table_info(table_info):
"""
Read input text files and return cleaned up DataFrame.
table_info is a dictionary that specifies the following input params.
See input_table_list in settings.yaml in the example folder for a working example
+--------------+----------------------------------------------------------+
| key | description |
+==============+=========================================+================+
| tablename | name of pipeline table in which to store dataframe |
+--------------+----------------------------------------------------------+
| filename | name of csv file to read (in data_dir) |
+--------------+----------------------------------------------------------+
| column_map | list of input columns to rename from_name: to_name |
+--------------+----------------------------------------------------------+
| index_col | name of column to set as dataframe index column |
+--------------+----------------------------------------------------------+
| drop_columns | list of column names of columns to drop |
+--------------+----------------------------------------------------------+
| h5_tablename | name of target table in HDF5 file |
+--------------+----------------------------------------------------------+
"""
input_store = config.setting('input_store', None)
create_input_store = config.setting('create_input_store', default=False)
tablename = table_info.get('tablename')
data_filename = table_info.get('filename', input_store)
h5_tablename = table_info.get('h5_tablename') or tablename
drop_columns = table_info.get('drop_columns', None)
column_map = table_info.get('column_map', None)
keep_columns = table_info.get('keep_columns', None)
rename_columns = table_info.get('rename_columns', None)
index_col = table_info.get('index_col', None)
assert tablename is not None, 'no tablename provided'
assert data_filename is not None, 'no input file provided'
data_file_path = config.data_file_path(data_filename)
df = _read_input_file(data_file_path, h5_tablename=h5_tablename)
# logger.debug('raw %s table columns: %s' % (tablename, df.columns.values))
logger.debug('raw %s table size: %s' % (tablename, util.df_size(df)))
if create_input_store:
h5_filepath = config.output_file_path('input_data.h5')
logger.info('writing %s to %s' % (h5_tablename, h5_filepath))
df.to_hdf(h5_filepath, key=h5_tablename, mode='a')
csv_dir = config.output_file_path('input_data')
if not os.path.exists(csv_dir):
os.makedirs(csv_dir) # make directory if needed
df.to_csv(os.path.join(csv_dir, '%s.csv' % tablename), index=False)
if drop_columns:
logger.debug("dropping columns: %s" % drop_columns)
df.drop(columns=drop_columns, inplace=True, errors='ignore')
if column_map:
warnings.warn("table_inf option 'column_map' renamed 'rename_columns'"
"Support for 'column_map' will be removed in future versions.",
FutureWarning)
logger.debug("renaming columns: %s" % column_map)
df.rename(columns=column_map, inplace=True)
# rename columns first, so keep_columns can be a stable list of expected/required columns
if rename_columns:
logger.debug("renaming columns: %s" % rename_columns)
df.rename(columns=rename_columns, inplace=True)
# set index
if index_col is not None:
if index_col in df.columns:
assert not df.duplicated(index_col).any()
df.set_index(index_col, inplace=True)
else:
# FIXME not sure we want to do this. More likely they omitted index col than that they want to name it?
# df.index.names = [index_col]
logger.error(f"index_col '{index_col}' specified in configs but not in {tablename} table!")
logger.error(f"{tablename} columns are: {list(df.columns)}")
raise RuntimeError(f"index_col '{index_col}' not in {tablename} table!")
if keep_columns:
logger.debug("keeping columns: %s" % keep_columns)
if not set(keep_columns).issubset(set(df.columns)):
logger.error(f"Required columns missing from {tablename} table: "
f"{list(set(keep_columns).difference(set(df.columns)))}")
logger.error(f"{tablename} table has columns: {list(df.columns)}")
raise RuntimeError(f"Required columns missing from {tablename} table")
df = df[keep_columns]
if df.columns.duplicated().any():
duplicate_column_names = df.columns[df.columns.duplicated(keep=False)].unique().to_list()
assert not df.columns.duplicated().any(), f"duplicate columns names in {tablename}: {duplicate_column_names}"
logger.debug('%s table columns: %s' % (tablename, df.columns.values))
logger.debug('%s table size: %s' % (tablename, util.df_size(df)))
logger.info('%s index name: %s' % (tablename, df.index.name))
return df
def _read_input_file(filepath, h5_tablename=None):
assert os.path.exists(filepath), 'input file not found: %s' % filepath
if filepath.endswith('.csv'):
return _read_csv_with_fallback_encoding(filepath)
if filepath.endswith('.h5'):
assert h5_tablename is not None, 'must provide a tablename to read HDF5 table'
logger.info('reading %s table from %s' % (h5_tablename, filepath))
return pd.read_hdf(filepath, h5_tablename)
raise IOError(
'Unsupported file type: %s. '
'ActivitySim supports CSV and HDF5 files only' % filepath)
def _read_csv_with_fallback_encoding(filepath):
"""read a CSV to a pandas DataFrame using default utf-8 encoding,
but try alternate Windows-compatible cp1252 if unicode fails
"""
try:
logger.info('Reading CSV file %s' % filepath)
return pd.read_csv(filepath, comment='#')
except UnicodeDecodeError:
logger.warning(
'Reading %s with default utf-8 encoding failed, trying cp1252 instead', filepath)
return pd.read_csv(filepath, comment='#', encoding='cp1252')
| 40.021978 | 117 | 0.605437 |
7958f07f7a81ee681267c9608f219d2f92dcc3d8 | 1,669 | py | Python | django_websocket/middleware.py | carlesso/django-websocket | 619156b0ceed9cc7c29799158d5e87699a7b3ee8 | [
"BSD-3-Clause"
] | 1 | 2019-06-13T16:18:48.000Z | 2019-06-13T16:18:48.000Z | django_websocket/middleware.py | carlesso/django-websocket | 619156b0ceed9cc7c29799158d5e87699a7b3ee8 | [
"BSD-3-Clause"
] | null | null | null | django_websocket/middleware.py | carlesso/django-websocket | 619156b0ceed9cc7c29799158d5e87699a7b3ee8 | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.http import HttpResponseBadRequest
from django_websocket.websocket import setup_websocket, MalformedWebSocket
WEBSOCKET_ACCEPT_ALL = getattr(settings, 'WEBSOCKET_ACCEPT_ALL', False)
class WebSocketMiddleware(object):
def process_request(self, request):
try:
request.websocket = setup_websocket(request)
except MalformedWebSocket, e:
request.websocket = None
return HttpResponseBadRequest()
finally:
# provide ``request.is_websocket()`` interface, similiar to
# ``request.is_ajax()``.
if request.websocket is not None:
request.is_websocket = lambda: True
else:
request.is_websocket = lambda: False
def process_view(self, request, view_func, view_args, view_kwargs):
# open websocket if its an accepted request
if request.is_websocket():
# deny websocket request if view can't handle websocket
if not WEBSOCKET_ACCEPT_ALL and \
not getattr(view_func, 'accept_websocket', False):
return HttpResponseBadRequest()
# everything is fine .. so prepare connection by sending handshake
request.websocket.send_handshake()
elif getattr(view_func, 'require_websocket', False):
# websocket was required but not provided
return HttpResponseBadRequest()
def process_response(self, request, response):
if request.is_websocket() and request.websocket._handshake_sent:
request.websocket._send_closing_frame(True)
return response
| 40.707317 | 78 | 0.666267 |
7958f0bac3afce24f2a04710d045579bd723570c | 1,195 | py | Python | skdist/tests/test_postprocessing.py | awesome-archive/sk-dist | 0e478a4655fcd28cf4ce6395197747dae133263d | [
"Apache-2.0"
] | 1 | 2019-09-28T02:22:45.000Z | 2019-09-28T02:22:45.000Z | skdist/tests/test_postprocessing.py | lucassocct/sk-dist | 15cd2383f2927313c7cfb539ea9ca8909727b84a | [
"Apache-2.0"
] | null | null | null | skdist/tests/test_postprocessing.py | lucassocct/sk-dist | 15cd2383f2927313c7cfb539ea9ca8909727b84a | [
"Apache-2.0"
] | null | null | null | """
Test postprocessing
"""
try:
import numpy as np
from sklearn.linear_model import LogisticRegression
from skdist import postprocessing
_import_error = None
except Exception as e:
_import_error = e
def test_import_postprocessing():
assert _import_error == None
def test_predict():
X = np.array([[1,2,3], [4,5,6]])
y = np.array([0,1])
model1 = LogisticRegression(solver="liblinear")
model2 = LogisticRegression(solver="lbfgs")
model1.fit(X,y)
model2.fit(X,y)
clf = postprocessing.SimpleVoter(
[("model1", model1), ("model2", model2)],
voting="soft", classes=model1.classes_
)
pred = clf.predict(X)
probs = clf.predict_proba(X)
assert pred.shape == y.shape
def test_predict_hard_voting():
X = np.array([[1,2,3], [4,5,6]])
y = np.array([0,1])
model1 = LogisticRegression(solver="liblinear")
model2 = LogisticRegression(solver="lbfgs")
model1.fit(X,y)
model2.fit(X,y)
clf = postprocessing.SimpleVoter(
[("model1", model1), ("model2", model2)],
voting="hard", classes=model1.classes_
)
pred = clf.predict(X)
assert pred.shape == y.shape
| 24.895833 | 55 | 0.632636 |
7958f0e4828de1052d8310ca2a62680ae91f5cf5 | 28,326 | py | Python | src/oci/data_safe/models/sensitive_column_summary.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/data_safe/models/sensitive_column_summary.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/data_safe/models/sensitive_column_summary.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class SensitiveColumnSummary(object):
"""
Summary of a sensitive column present in a sensitive data model.
"""
#: A constant which can be used with the lifecycle_state property of a SensitiveColumnSummary.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a SensitiveColumnSummary.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a SensitiveColumnSummary.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a SensitiveColumnSummary.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a SensitiveColumnSummary.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the object_type property of a SensitiveColumnSummary.
#: This constant has a value of "TABLE"
OBJECT_TYPE_TABLE = "TABLE"
#: A constant which can be used with the object_type property of a SensitiveColumnSummary.
#: This constant has a value of "EDITIONING_VIEW"
OBJECT_TYPE_EDITIONING_VIEW = "EDITIONING_VIEW"
#: A constant which can be used with the status property of a SensitiveColumnSummary.
#: This constant has a value of "VALID"
STATUS_VALID = "VALID"
#: A constant which can be used with the status property of a SensitiveColumnSummary.
#: This constant has a value of "INVALID"
STATUS_INVALID = "INVALID"
#: A constant which can be used with the source property of a SensitiveColumnSummary.
#: This constant has a value of "MANUAL"
SOURCE_MANUAL = "MANUAL"
#: A constant which can be used with the source property of a SensitiveColumnSummary.
#: This constant has a value of "DISCOVERY"
SOURCE_DISCOVERY = "DISCOVERY"
#: A constant which can be used with the relation_type property of a SensitiveColumnSummary.
#: This constant has a value of "NONE"
RELATION_TYPE_NONE = "NONE"
#: A constant which can be used with the relation_type property of a SensitiveColumnSummary.
#: This constant has a value of "APP_DEFINED"
RELATION_TYPE_APP_DEFINED = "APP_DEFINED"
#: A constant which can be used with the relation_type property of a SensitiveColumnSummary.
#: This constant has a value of "DB_DEFINED"
RELATION_TYPE_DB_DEFINED = "DB_DEFINED"
def __init__(self, **kwargs):
"""
Initializes a new SensitiveColumnSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key:
The value to assign to the key property of this SensitiveColumnSummary.
:type key: str
:param sensitive_data_model_id:
The value to assign to the sensitive_data_model_id property of this SensitiveColumnSummary.
:type sensitive_data_model_id: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this SensitiveColumnSummary.
Allowed values for this property are: "CREATING", "ACTIVE", "UPDATING", "DELETING", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this SensitiveColumnSummary.
:type lifecycle_details: str
:param time_created:
The value to assign to the time_created property of this SensitiveColumnSummary.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this SensitiveColumnSummary.
:type time_updated: datetime
:param app_name:
The value to assign to the app_name property of this SensitiveColumnSummary.
:type app_name: str
:param schema_name:
The value to assign to the schema_name property of this SensitiveColumnSummary.
:type schema_name: str
:param object_name:
The value to assign to the object_name property of this SensitiveColumnSummary.
:type object_name: str
:param column_name:
The value to assign to the column_name property of this SensitiveColumnSummary.
:type column_name: str
:param object_type:
The value to assign to the object_type property of this SensitiveColumnSummary.
Allowed values for this property are: "TABLE", "EDITIONING_VIEW", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type object_type: str
:param data_type:
The value to assign to the data_type property of this SensitiveColumnSummary.
:type data_type: str
:param status:
The value to assign to the status property of this SensitiveColumnSummary.
Allowed values for this property are: "VALID", "INVALID", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type status: str
:param sensitive_type_id:
The value to assign to the sensitive_type_id property of this SensitiveColumnSummary.
:type sensitive_type_id: str
:param source:
The value to assign to the source property of this SensitiveColumnSummary.
Allowed values for this property are: "MANUAL", "DISCOVERY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type source: str
:param parent_column_keys:
The value to assign to the parent_column_keys property of this SensitiveColumnSummary.
:type parent_column_keys: list[str]
:param relation_type:
The value to assign to the relation_type property of this SensitiveColumnSummary.
Allowed values for this property are: "NONE", "APP_DEFINED", "DB_DEFINED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type relation_type: str
:param estimated_data_value_count:
The value to assign to the estimated_data_value_count property of this SensitiveColumnSummary.
:type estimated_data_value_count: int
:param sample_data_values:
The value to assign to the sample_data_values property of this SensitiveColumnSummary.
:type sample_data_values: list[str]
"""
self.swagger_types = {
'key': 'str',
'sensitive_data_model_id': 'str',
'lifecycle_state': 'str',
'lifecycle_details': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'app_name': 'str',
'schema_name': 'str',
'object_name': 'str',
'column_name': 'str',
'object_type': 'str',
'data_type': 'str',
'status': 'str',
'sensitive_type_id': 'str',
'source': 'str',
'parent_column_keys': 'list[str]',
'relation_type': 'str',
'estimated_data_value_count': 'int',
'sample_data_values': 'list[str]'
}
self.attribute_map = {
'key': 'key',
'sensitive_data_model_id': 'sensitiveDataModelId',
'lifecycle_state': 'lifecycleState',
'lifecycle_details': 'lifecycleDetails',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'app_name': 'appName',
'schema_name': 'schemaName',
'object_name': 'objectName',
'column_name': 'columnName',
'object_type': 'objectType',
'data_type': 'dataType',
'status': 'status',
'sensitive_type_id': 'sensitiveTypeId',
'source': 'source',
'parent_column_keys': 'parentColumnKeys',
'relation_type': 'relationType',
'estimated_data_value_count': 'estimatedDataValueCount',
'sample_data_values': 'sampleDataValues'
}
self._key = None
self._sensitive_data_model_id = None
self._lifecycle_state = None
self._lifecycle_details = None
self._time_created = None
self._time_updated = None
self._app_name = None
self._schema_name = None
self._object_name = None
self._column_name = None
self._object_type = None
self._data_type = None
self._status = None
self._sensitive_type_id = None
self._source = None
self._parent_column_keys = None
self._relation_type = None
self._estimated_data_value_count = None
self._sample_data_values = None
@property
def key(self):
"""
**[Required]** Gets the key of this SensitiveColumnSummary.
The unique key that identifies the sensitive column. It's numeric and unique within a sensitive data model.
:return: The key of this SensitiveColumnSummary.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this SensitiveColumnSummary.
The unique key that identifies the sensitive column. It's numeric and unique within a sensitive data model.
:param key: The key of this SensitiveColumnSummary.
:type: str
"""
self._key = key
@property
def sensitive_data_model_id(self):
"""
**[Required]** Gets the sensitive_data_model_id of this SensitiveColumnSummary.
The OCID of the sensitive data model that contains the sensitive column.
:return: The sensitive_data_model_id of this SensitiveColumnSummary.
:rtype: str
"""
return self._sensitive_data_model_id
@sensitive_data_model_id.setter
def sensitive_data_model_id(self, sensitive_data_model_id):
"""
Sets the sensitive_data_model_id of this SensitiveColumnSummary.
The OCID of the sensitive data model that contains the sensitive column.
:param sensitive_data_model_id: The sensitive_data_model_id of this SensitiveColumnSummary.
:type: str
"""
self._sensitive_data_model_id = sensitive_data_model_id
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this SensitiveColumnSummary.
The current state of the sensitive column.
Allowed values for this property are: "CREATING", "ACTIVE", "UPDATING", "DELETING", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this SensitiveColumnSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this SensitiveColumnSummary.
The current state of the sensitive column.
:param lifecycle_state: The lifecycle_state of this SensitiveColumnSummary.
:type: str
"""
allowed_values = ["CREATING", "ACTIVE", "UPDATING", "DELETING", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this SensitiveColumnSummary.
Details about the current state of the sensitive column.
:return: The lifecycle_details of this SensitiveColumnSummary.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this SensitiveColumnSummary.
Details about the current state of the sensitive column.
:param lifecycle_details: The lifecycle_details of this SensitiveColumnSummary.
:type: str
"""
self._lifecycle_details = lifecycle_details
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this SensitiveColumnSummary.
The date and time, in the format defined by `RFC3339`__,
the sensitive column was created in the sensitive data model.
__ https://tools.ietf.org/html/rfc3339
:return: The time_created of this SensitiveColumnSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this SensitiveColumnSummary.
The date and time, in the format defined by `RFC3339`__,
the sensitive column was created in the sensitive data model.
__ https://tools.ietf.org/html/rfc3339
:param time_created: The time_created of this SensitiveColumnSummary.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
**[Required]** Gets the time_updated of this SensitiveColumnSummary.
The date and time, in the format defined by `RFC3339`__,
the sensitive column was last updated in the sensitive data model.
__ https://tools.ietf.org/html/rfc3339
:return: The time_updated of this SensitiveColumnSummary.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this SensitiveColumnSummary.
The date and time, in the format defined by `RFC3339`__,
the sensitive column was last updated in the sensitive data model.
__ https://tools.ietf.org/html/rfc3339
:param time_updated: The time_updated of this SensitiveColumnSummary.
:type: datetime
"""
self._time_updated = time_updated
@property
def app_name(self):
"""
**[Required]** Gets the app_name of this SensitiveColumnSummary.
The name of the application associated with the sensitive column. It's useful when the application name is
different from the schema name. Otherwise, it can be ignored.
:return: The app_name of this SensitiveColumnSummary.
:rtype: str
"""
return self._app_name
@app_name.setter
def app_name(self, app_name):
"""
Sets the app_name of this SensitiveColumnSummary.
The name of the application associated with the sensitive column. It's useful when the application name is
different from the schema name. Otherwise, it can be ignored.
:param app_name: The app_name of this SensitiveColumnSummary.
:type: str
"""
self._app_name = app_name
@property
def schema_name(self):
"""
**[Required]** Gets the schema_name of this SensitiveColumnSummary.
The database schema that contains the sensitive column.
:return: The schema_name of this SensitiveColumnSummary.
:rtype: str
"""
return self._schema_name
@schema_name.setter
def schema_name(self, schema_name):
"""
Sets the schema_name of this SensitiveColumnSummary.
The database schema that contains the sensitive column.
:param schema_name: The schema_name of this SensitiveColumnSummary.
:type: str
"""
self._schema_name = schema_name
@property
def object_name(self):
"""
**[Required]** Gets the object_name of this SensitiveColumnSummary.
The database object that contains the sensitive column.
:return: The object_name of this SensitiveColumnSummary.
:rtype: str
"""
return self._object_name
@object_name.setter
def object_name(self, object_name):
"""
Sets the object_name of this SensitiveColumnSummary.
The database object that contains the sensitive column.
:param object_name: The object_name of this SensitiveColumnSummary.
:type: str
"""
self._object_name = object_name
@property
def column_name(self):
"""
**[Required]** Gets the column_name of this SensitiveColumnSummary.
The name of the sensitive column.
:return: The column_name of this SensitiveColumnSummary.
:rtype: str
"""
return self._column_name
@column_name.setter
def column_name(self, column_name):
"""
Sets the column_name of this SensitiveColumnSummary.
The name of the sensitive column.
:param column_name: The column_name of this SensitiveColumnSummary.
:type: str
"""
self._column_name = column_name
@property
def object_type(self):
"""
**[Required]** Gets the object_type of this SensitiveColumnSummary.
The type of the database object that contains the sensitive column.
Allowed values for this property are: "TABLE", "EDITIONING_VIEW", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The object_type of this SensitiveColumnSummary.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this SensitiveColumnSummary.
The type of the database object that contains the sensitive column.
:param object_type: The object_type of this SensitiveColumnSummary.
:type: str
"""
allowed_values = ["TABLE", "EDITIONING_VIEW"]
if not value_allowed_none_or_none_sentinel(object_type, allowed_values):
object_type = 'UNKNOWN_ENUM_VALUE'
self._object_type = object_type
@property
def data_type(self):
"""
**[Required]** Gets the data_type of this SensitiveColumnSummary.
The data type of the sensitive column.
:return: The data_type of this SensitiveColumnSummary.
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""
Sets the data_type of this SensitiveColumnSummary.
The data type of the sensitive column.
:param data_type: The data_type of this SensitiveColumnSummary.
:type: str
"""
self._data_type = data_type
@property
def status(self):
"""
**[Required]** Gets the status of this SensitiveColumnSummary.
The status of the sensitive column. VALID means the column is considered sensitive. INVALID means the column
is not considered sensitive. Tracking invalid columns in a sensitive data model helps ensure that an
incremental data discovery job does not identify these columns as sensitive again.
Allowed values for this property are: "VALID", "INVALID", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The status of this SensitiveColumnSummary.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this SensitiveColumnSummary.
The status of the sensitive column. VALID means the column is considered sensitive. INVALID means the column
is not considered sensitive. Tracking invalid columns in a sensitive data model helps ensure that an
incremental data discovery job does not identify these columns as sensitive again.
:param status: The status of this SensitiveColumnSummary.
:type: str
"""
allowed_values = ["VALID", "INVALID"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
status = 'UNKNOWN_ENUM_VALUE'
self._status = status
@property
def sensitive_type_id(self):
"""
Gets the sensitive_type_id of this SensitiveColumnSummary.
The OCID of the sensitive type associated with the sensitive column.
:return: The sensitive_type_id of this SensitiveColumnSummary.
:rtype: str
"""
return self._sensitive_type_id
@sensitive_type_id.setter
def sensitive_type_id(self, sensitive_type_id):
"""
Sets the sensitive_type_id of this SensitiveColumnSummary.
The OCID of the sensitive type associated with the sensitive column.
:param sensitive_type_id: The sensitive_type_id of this SensitiveColumnSummary.
:type: str
"""
self._sensitive_type_id = sensitive_type_id
@property
def source(self):
"""
**[Required]** Gets the source of this SensitiveColumnSummary.
The source of the sensitive column. DISCOVERY indicates that the column was added to the sensitive data model
using a data discovery job. MANUAL indicates that the column was added manually.
Allowed values for this property are: "MANUAL", "DISCOVERY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The source of this SensitiveColumnSummary.
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""
Sets the source of this SensitiveColumnSummary.
The source of the sensitive column. DISCOVERY indicates that the column was added to the sensitive data model
using a data discovery job. MANUAL indicates that the column was added manually.
:param source: The source of this SensitiveColumnSummary.
:type: str
"""
allowed_values = ["MANUAL", "DISCOVERY"]
if not value_allowed_none_or_none_sentinel(source, allowed_values):
source = 'UNKNOWN_ENUM_VALUE'
self._source = source
@property
def parent_column_keys(self):
"""
Gets the parent_column_keys of this SensitiveColumnSummary.
Unique keys identifying the columns that are parents of the sensitive column. At present, it tracks a single parent only.
:return: The parent_column_keys of this SensitiveColumnSummary.
:rtype: list[str]
"""
return self._parent_column_keys
@parent_column_keys.setter
def parent_column_keys(self, parent_column_keys):
"""
Sets the parent_column_keys of this SensitiveColumnSummary.
Unique keys identifying the columns that are parents of the sensitive column. At present, it tracks a single parent only.
:param parent_column_keys: The parent_column_keys of this SensitiveColumnSummary.
:type: list[str]
"""
self._parent_column_keys = parent_column_keys
@property
def relation_type(self):
"""
**[Required]** Gets the relation_type of this SensitiveColumnSummary.
The type of referential relationship the sensitive column has with its parent. NONE indicates that the
sensitive column does not have a parent. DB_DEFINED indicates that the relationship is defined in the database
dictionary. APP_DEFINED indicates that the relationship is defined at the application level and not in the database dictionary.
Allowed values for this property are: "NONE", "APP_DEFINED", "DB_DEFINED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The relation_type of this SensitiveColumnSummary.
:rtype: str
"""
return self._relation_type
@relation_type.setter
def relation_type(self, relation_type):
"""
Sets the relation_type of this SensitiveColumnSummary.
The type of referential relationship the sensitive column has with its parent. NONE indicates that the
sensitive column does not have a parent. DB_DEFINED indicates that the relationship is defined in the database
dictionary. APP_DEFINED indicates that the relationship is defined at the application level and not in the database dictionary.
:param relation_type: The relation_type of this SensitiveColumnSummary.
:type: str
"""
allowed_values = ["NONE", "APP_DEFINED", "DB_DEFINED"]
if not value_allowed_none_or_none_sentinel(relation_type, allowed_values):
relation_type = 'UNKNOWN_ENUM_VALUE'
self._relation_type = relation_type
@property
def estimated_data_value_count(self):
"""
**[Required]** Gets the estimated_data_value_count of this SensitiveColumnSummary.
The estimated number of data values the column has in the associated database.
:return: The estimated_data_value_count of this SensitiveColumnSummary.
:rtype: int
"""
return self._estimated_data_value_count
@estimated_data_value_count.setter
def estimated_data_value_count(self, estimated_data_value_count):
"""
Sets the estimated_data_value_count of this SensitiveColumnSummary.
The estimated number of data values the column has in the associated database.
:param estimated_data_value_count: The estimated_data_value_count of this SensitiveColumnSummary.
:type: int
"""
self._estimated_data_value_count = estimated_data_value_count
@property
def sample_data_values(self):
"""
Gets the sample_data_values of this SensitiveColumnSummary.
Original data values collected for the sensitive column from the associated database. Sample data helps review
the column and ensure that it actually contains sensitive data. Note that sample data is retrieved by a data
discovery job only if the isSampleDataCollectionEnabled attribute is set to true. At present, only one data
value is collected per sensitive column.
:return: The sample_data_values of this SensitiveColumnSummary.
:rtype: list[str]
"""
return self._sample_data_values
@sample_data_values.setter
def sample_data_values(self, sample_data_values):
"""
Sets the sample_data_values of this SensitiveColumnSummary.
Original data values collected for the sensitive column from the associated database. Sample data helps review
the column and ensure that it actually contains sensitive data. Note that sample data is retrieved by a data
discovery job only if the isSampleDataCollectionEnabled attribute is set to true. At present, only one data
value is collected per sensitive column.
:param sample_data_values: The sample_data_values of this SensitiveColumnSummary.
:type: list[str]
"""
self._sample_data_values = sample_data_values
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 37.517881 | 245 | 0.675139 |
7958f24e4014c54eaf025118bb9811c0e4c9130b | 767 | py | Python | download.py | bedna-KU/Loto_sk_prediction | 689991212a1450b865e01461ad7e892f06eb5774 | [
"BSD-2-Clause"
] | null | null | null | download.py | bedna-KU/Loto_sk_prediction | 689991212a1450b865e01461ad7e892f06eb5774 | [
"BSD-2-Clause"
] | null | null | null | download.py | bedna-KU/Loto_sk_prediction | 689991212a1450b865e01461ad7e892f06eb5774 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import requests
from clint.textui import progress
import os
url1 = "https://www.tipos.sk/loterie/ciselne-loterie/archiv-vyzrebovanych-cisel?file=loto1"
url2 = "https://www.tipos.sk/loterie/ciselne-loterie/archiv-vyzrebovanych-cisel?file=loto2"
def download (url, file):
r = requests.get (url, stream = True)
if r.status_code == 200:
with open (file, "wb") as file:
total_length = int (r.headers.get ('content-length'))
for ch in progress.bar (r.iter_content (chunk_size = 2391975), expected_size = (total_length / 1024) + 1):
if ch:
file.write (ch)
else:
print("Download failed", r.status_code)
download (url1, "loto1.csv")
download (url2, "loto2.csv")
| 34.863636 | 118 | 0.65189 |
7958f413b366ee8854b729ee9e4db80783ff0cc0 | 8,188 | py | Python | src/scancode_analyzer/summary.py | AyanSinhaMahapatra/scancode-analyzer | 5cc8ccda7a1301b0886fd321550d77192213e936 | [
"Apache-2.0"
] | 1 | 2021-01-02T17:27:52.000Z | 2021-01-02T17:27:52.000Z | src/scancode_analyzer/summary.py | AyanSinhaMahapatra/scancode-results-analyzer | 5cc8ccda7a1301b0886fd321550d77192213e936 | [
"Apache-2.0"
] | null | null | null | src/scancode_analyzer/summary.py | AyanSinhaMahapatra/scancode-results-analyzer | 5cc8ccda7a1301b0886fd321550d77192213e936 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
from collections import Counter
import attr
"""
Data Format and example output of analyzer summary, having unique
license detection issues and statistics.
codebase_level:
- license_detection_issues_summary: SummaryLicenseIssues
- unique_license_detection_issues: list of UniqueIssue
- issue_categoryentifier: 1
- files: list of FileRegions
- path: "path/to/occurrence"
- start_line: 1
- end_line: 2
- license_detection_issue: LicenseDetectionIssue
- statistics: StatisticsLicenseIssues
- total_files_with_license: 43
- total_files_with_license_detection_issues: 17
- total_unique_license_detection_issues: 3
- issue_category_counts:
- imperfect-match-coverage: 2
- unknown-match: 1
- issue_classification_id_counts:
- text-lic-text-fragments: 1
- notice-has-unknown-match: 1
- reference-low-coverage-refs: 1
- license_info_type_flags_counts:
- license_text: 1
- license_notice: 1
- license_reference: 1
- analysis_confidence_counts:
- high: 1
- medium: 2
- low: 0
"""
@attr.s
class SummaryLicenseIssues:
"""
Codebase level summary of License Detection Issues.
"""
statistics = attr.ib()
unique_license_detection_issues = attr.ib(factory=list)
def to_dict(self):
return attr.asdict(self)
@staticmethod
def summarize(license_issues, count_has_license, count_files_with_issues):
"""
Generate summary with Unique Issues and Statistics.
"""
unique_issues = UniqueIssue.get_unique_issues(
license_issues,
)
statistics=StatisticsLicenseIssues.generate_statistics(
license_issues=license_issues,
count_unique_issues=len(unique_issues),
count_has_license=count_has_license,
count_files_with_issues=count_files_with_issues,
)
return SummaryLicenseIssues(
unique_license_detection_issues=unique_issues,
statistics=statistics,
)
@attr.s
class StatisticsLicenseIssues:
"""
All statistics on License Detection Issues from the analysis.
"""
total_files_with_license = attr.ib(type=int)
total_files_with_license_detection_issues = attr.ib(type=int)
total_unique_license_detection_issues = attr.ib(type=int, default=0)
# Stats on analyzer.LicenseDetectionIssue.issue_category
issue_category_counts = attr.ib(factory=dict)
# Stats on analyzer.LicenseDetectionIssue.issue_type.classification_id
issue_classification_id_counts = attr.ib(factory=dict)
# Stats on analyzer.LicenseDetectionIssue.issue_type.analysis_confidence
analysis_confidence_counts = attr.ib(factory=dict)
# Stats on the 4 flags of analyzer.LicenseDetectionIssue.issue_type
# i.e. is_license['text','notice','tag','reference']
license_info_type_counts = attr.ib(factory=dict)
@staticmethod
def generate_statistics(
license_issues, count_unique_issues, count_has_license, count_files_with_issues
):
"""
Get all unique license detection issues for the scan
and their occurances, from all the issues.
:param license_issues: list of LicenseDetectionIssue
:param count_unique_issues: int
Number of unique license detection issues
:param count_has_license int:
Number of files having license information
:param count_files_with_issues: int
Number of files having license detection issues
:returns UniqueLicenseIssues: list of UniqueIssue
"""
issue_statistics = dict(Counter((
issue.issue_category for issue in license_issues
)))
issue_type_statistics = dict(Counter((
issue.issue_type.classification_id
for issue in license_issues
)))
flags_statistics = {
"license_text": sum((
issue.issue_type.is_license_text
for issue in license_issues
)),
"license_notice": sum((
issue.issue_type.is_license_notice
for issue in license_issues
)),
"license_tag": sum((
issue.issue_type.is_license_tag
for issue in license_issues
)),
"license_reference": sum((
issue.issue_type.is_license_reference
for issue in license_issues
)),
}
license_info_type_statistics = {
flag: count
for flag, count in flags_statistics.items()
if count
}
analysis_confidence_statistics = dict(Counter((
issue.issue_type.analysis_confidence for issue in license_issues
)))
return StatisticsLicenseIssues(
total_files_with_license=count_has_license,
total_files_with_license_detection_issues=count_files_with_issues,
total_unique_license_detection_issues=count_unique_issues,
issue_category_counts=issue_statistics,
issue_classification_id_counts=issue_type_statistics,
license_info_type_counts=license_info_type_statistics,
analysis_confidence_counts=analysis_confidence_statistics,
)
@attr.s
class UniqueIssue:
"""
An unique License Detection Issue.
"""
unique_identifier = attr.ib(type=int)
license_detection_issue = attr.ib()
files = attr.ib(factory=list)
@staticmethod
def get_formatted_unique_issue(
license_issue, files, unique_identifier
):
return UniqueIssue(
license_detection_issue=license_issue.to_dict(),
files=files,
unique_identifier = unique_identifier,
)
@staticmethod
def get_unique_issues(license_issues):
"""
Get all unique license detection issues for the scan
and their occurances, from all the issues.
:param license_issues: list of LicenseDetectionIssue
:returns UniqueLicenseIssues: list of UniqueIssue
"""
identifiers = get_identifiers(license_issues)
unique_issue_category_counts = dict(Counter(identifiers))
unique_license_issues = []
for issue_number, (unique_issue_identifier, counts) in enumerate(
unique_issue_category_counts.items(), start=1,
):
file_regions = (
issue.file_regions[0]
for issue in license_issues
if unique_issue_identifier in [issue.identifier, issue.identifier_for_unknown_intro]
)
all_issues = (
issue
for issue in license_issues
if unique_issue_identifier in [issue.identifier, issue.identifier_for_unknown_intro]
)
unique_license_issues.append(
UniqueIssue.get_formatted_unique_issue(
files=list(file_regions),
license_issue=next(all_issues),
unique_identifier=issue_number,
)
)
return unique_license_issues
def get_identifiers(license_issues):
"""
Get identifiers for all license detection issues.
:param license_issues: list of LicenseDetectionIssue
:returns identifiers: list of tuples
"""
identifiers = (
issue.identifier if issue.issue_category != "unknown-match"
else issue.identifier_for_unknown_intro
for issue in license_issues
)
return identifiers
| 33.975104 | 100 | 0.64509 |
7958f4744d148fcf37845491b6913b9f1ceada8e | 12,628 | py | Python | virtual/lib/python3.6/site-packages/typed_ast/ast27.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 3 | 2019-03-17T11:27:05.000Z | 2020-08-30T04:19:11.000Z | virtual/lib/python3.6/site-packages/typed_ast/ast27.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 16 | 2020-02-12T00:28:11.000Z | 2022-03-11T23:48:19.000Z | virtual/lib/python3.6/site-packages/typed_ast/ast27.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 2 | 2020-04-30T18:47:05.000Z | 2021-05-24T15:07:41.000Z | # -*- coding: utf-8 -*-
"""
ast27
~~~
The `ast27` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it. The `ast27`
module is similar to the builtin `ast` module on Python 2.7, except `ast27`
runs on Python 3 and provides PEP 484 type comments as part of the AST.
Specifically, these changes are made to the Python 2.7 AST:
- The `FunctionDef`, `Assign`, `For`, and `With` classes all have a
`type_comment` field which contains a `str` with the text of the
associated type comment, if any.
- `arguments` has a `type_comments` list of per-argument type comments.
- `parse` has been augmented so it can parse function signature types when
called with `mode=func_type`.
- `Module` has a `type_ignores` field which contains a list of
lines which have been `# type: ignore`d.
- `Str` has a `kind` string field which preserves the original string
prefix, so that `ast27.parse('br"test"').body[0].value.kind == 'br'`.
An abstract syntax tree can be generated by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast27.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from typed_ast import _ast27
from typed_ast._ast27 import *
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node with type comments.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return _ast27.parse(source, filename, mode)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, Str):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
elif isinstance(node, BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, Num) and \
isinstance(node.right.n, complex) and \
isinstance(node.left, Num) and \
isinstance(node.left.n, (int, long, float)):
left = node.left.n
right = node.right.n
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed string')
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id, kind='')),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
| 38.855385 | 81 | 0.625752 |
7958f4a2689b8ac22f59cb4a6986b13fab2b6e66 | 16,126 | py | Python | Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/debug/lib/debug_graphs.py | JustinACoder/H22-GR3-UnrealAI | 361eb9ef1147f8a2991e5f98c4118cd823184adf | [
"MIT"
] | 6 | 2022-02-04T18:12:24.000Z | 2022-03-21T23:57:12.000Z | Lib/site-packages/tensorflow/python/debug/lib/debug_graphs.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/tensorflow/python/debug/lib/debug_graphs.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | 1 | 2022-02-08T03:53:23.000Z | 2022-02-08T03:53:23.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and methods for processing debugger-decorated graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import op_def_registry
from tensorflow.python.platform import tf_logging as logging
def parse_node_or_tensor_name(name):
"""Get the node name from a string that can be node or tensor name.
Args:
name: An input node name (e.g., "node_a") or tensor name (e.g.,
"node_a:0"), as a str.
Returns:
1) The node name, as a str. If the input name is a tensor name, i.e.,
consists of a colon, the final colon and the following output slot
will be stripped.
2) If the input name is a tensor name, the output slot, as an int. If
the input name is not a tensor name, None.
"""
if ":" in name and not name.endswith(":"):
node_name = name[:name.rfind(":")]
output_slot = int(name[name.rfind(":") + 1:])
return node_name, output_slot
else:
return name, None
def get_node_name(element_name):
node_name, _ = parse_node_or_tensor_name(element_name)
return node_name
def get_output_slot(element_name):
"""Get the output slot number from the name of a graph element.
If element_name is a node name without output slot at the end, 0 will be
assumed.
Args:
element_name: (`str`) name of the graph element in question.
Returns:
(`int`) output slot number.
"""
_, output_slot = parse_node_or_tensor_name(element_name)
return output_slot if output_slot is not None else 0
def is_copy_node(node_name):
"""Determine whether a node name is that of a debug Copy node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_options.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug Copy
node.
"""
return node_name.startswith("__copy_")
def is_debug_node(node_name):
"""Determine whether a node name is that of a debug node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_options.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug node.
"""
return node_name.startswith("__dbg_")
def parse_debug_node_name(node_name):
"""Parse the name of a debug node.
Args:
node_name: Name of the debug node.
Returns:
1. Name of the watched node, as a str.
2. Output slot index of the watched tensor, as an int.
3. Index of the debug node, as an int.
4. Name of the debug op, as a str, e.g, "DebugIdentity".
Raises:
ValueError: If the input node name is not a valid debug node name.
"""
prefix = "__dbg_"
name = node_name
if not name.startswith(prefix):
raise ValueError("Invalid prefix in debug node name: '%s'" % node_name)
name = name[len(prefix):]
if name.count("_") < 2:
raise ValueError("Invalid debug node name: '%s'" % node_name)
debug_op = name[name.rindex("_") + 1:]
name = name[:name.rindex("_")]
debug_op_index = int(name[name.rindex("_") + 1:])
name = name[:name.rindex("_")]
if name.count(":") != 1:
raise ValueError("Invalid tensor name in debug node name: '%s'" % node_name)
watched_node_name = name[:name.index(":")]
watched_output_slot = int(name[name.index(":") + 1:])
return watched_node_name, watched_output_slot, debug_op_index, debug_op
class GraphTracingReachedDestination(Exception):
pass
class DFSGraphTracer(object):
"""Graph input tracer using depth-first search."""
def __init__(self,
input_lists,
skip_node_names=None,
destination_node_name=None):
"""Constructor of _DFSGraphTracer.
Args:
input_lists: A list of dicts. Each dict is an adjacency (input) map from
the recipient node name as the key and the list of input node names
as the value.
skip_node_names: Optional: a list of node names to skip tracing.
destination_node_name: Optional: destination node name. If not `None`, it
should be the name of a destination not as a str and the graph tracing
will raise GraphTracingReachedDestination as soon as the node has been
reached.
Raises:
GraphTracingReachedDestination: if stop_at_node_name is not None and
the specified node is reached.
"""
self._input_lists = input_lists
self._skip_node_names = skip_node_names
self._inputs = []
self._visited_nodes = []
self._depth_count = 0
self._depth_list = []
self._destination_node_name = destination_node_name
def trace(self, graph_element_name):
"""Trace inputs.
Args:
graph_element_name: Name of the node or an output tensor of the node, as a
str.
Raises:
GraphTracingReachedDestination: if destination_node_name of this tracer
object is not None and the specified node is reached.
"""
self._depth_count += 1
node_name = get_node_name(graph_element_name)
if node_name == self._destination_node_name:
raise GraphTracingReachedDestination()
if node_name in self._skip_node_names:
return
if node_name in self._visited_nodes:
return
self._visited_nodes.append(node_name)
for input_list in self._input_lists:
if node_name not in input_list:
continue
for inp in input_list[node_name]:
if get_node_name(inp) in self._visited_nodes:
continue
self._inputs.append(inp)
self._depth_list.append(self._depth_count)
self.trace(inp)
self._depth_count -= 1
def inputs(self):
return self._inputs
def depth_list(self):
return self._depth_list
def _infer_device_name(graph_def):
"""Infer device name from a partition GraphDef."""
device_name = None
for node in graph_def.node:
if node.device:
device_name = node.device
break
if device_name is None:
logging.warn(
"Failed to infer device name from partition GraphDef: none of the "
"nodes of the GraphDef has a non-empty device name.")
return device_name
class DebugGraph(object):
"""Represents a debugger-decorated graph."""
def __init__(self, debug_graph_def, device_name=None):
self._debug_graph_def = debug_graph_def
self._non_debug_graph_def = None
self._node_attributes = {}
self._node_inputs = {}
self._node_reversed_ref_inputs = {}
self._node_ctrl_inputs = {}
self._node_recipients = {}
self._node_ctrl_recipients = {}
self._node_devices = {}
self._node_op_types = {}
self._copy_send_nodes = []
self._ref_args = {}
self._device_name = device_name
if not self._device_name:
self._device_name = _infer_device_name(debug_graph_def)
for node in debug_graph_def.node:
self._process_debug_graph_node(node)
self._prune_non_control_edges_of_debug_ops()
self._prune_control_edges_of_debug_ops()
self._prune_nodes_from_input_and_recipient_maps(self._get_copy_nodes())
self._populate_recipient_maps()
def _process_debug_graph_node(self, node):
"""Process a node from the debug GraphDef.
Args:
node: (NodeDef) A partition-graph node to be processed.
Raises:
ValueError: If duplicate node names are encountered.
"""
if is_debug_node(node.name):
# This is a debug node. Parse the node name and retrieve the
# information about debug watches on tensors. But do not include
# the node in the graph.
return
if node.name in self._node_inputs:
raise ValueError("Duplicate node name on device %s: '%s'" %
(self._device_name, node.name))
self._node_attributes[node.name] = node.attr
self._node_inputs[node.name] = []
self._node_ctrl_inputs[node.name] = []
self._node_recipients[node.name] = []
self._node_ctrl_recipients[node.name] = []
if node.name not in self._node_devices:
self._node_devices[node.name] = set()
self._node_devices[node.name].add(
node.device if node.device else self._device_name)
self._node_op_types[node.name] = node.op
self._ref_args[node.name] = self._get_ref_args(node)
for inp in node.input:
if is_copy_node(inp) and (node.op == "_Send" or node.op == "_Retval"):
self._copy_send_nodes.append(node.name)
if inp.startswith("^"):
cinp = inp[1:]
self._node_ctrl_inputs[node.name].append(cinp)
else:
self._node_inputs[node.name].append(inp)
def _get_ref_args(self, node):
"""Determine whether an input of an op is ref-type.
Args:
node: A `NodeDef`.
Returns:
A list of the arg names (as strs) that are ref-type.
"""
op_def = op_def_registry.get_registered_ops().get(node.op)
ref_args = []
if op_def:
for i, output_arg in enumerate(op_def.output_arg):
if output_arg.is_ref:
arg_name = node.name if i == 0 else ("%s:%d" % (node.name, i))
ref_args.append(arg_name)
return ref_args
def _get_copy_nodes(self):
"""Find all Copy nodes in the loaded graph."""
copy_nodes = []
for node in self._node_inputs:
if is_copy_node(node):
copy_nodes.append(node)
return copy_nodes
def _prune_non_control_edges_of_debug_ops(self):
"""Prune (non-control) edges related to debug ops.
Prune the Copy ops and associated _Send ops inserted by the debugger out
from the non-control inputs and output recipients map. Replace the inputs
and recipients with original ones.
"""
for node in self._node_inputs:
inputs = self._node_inputs[node]
for i in xrange(len(inputs)):
inp = inputs[i]
if is_copy_node(inp):
# Find the input to the Copy node, which should be the original
# input to the node.
orig_inp = self._node_inputs[inp][0]
inputs[i] = orig_inp
def _prune_control_edges_of_debug_ops(self):
"""Prune control edges related to the debug ops."""
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
debug_op_inputs = []
for ctrl_inp in ctrl_inputs:
if is_debug_node(ctrl_inp):
debug_op_inputs.append(ctrl_inp)
for debug_op_inp in debug_op_inputs:
ctrl_inputs.remove(debug_op_inp)
def _populate_recipient_maps(self):
"""Populate the map from node name to recipient(s) of its output(s).
This method also populates the input map based on reversed ref edges.
"""
for node in self._node_inputs:
inputs = self._node_inputs[node]
for inp in inputs:
inp = get_node_name(inp)
if inp not in self._node_recipients:
self._node_recipients[inp] = []
self._node_recipients[inp].append(node)
if inp in self._ref_args:
if inp not in self._node_reversed_ref_inputs:
self._node_reversed_ref_inputs[inp] = []
self._node_reversed_ref_inputs[inp].append(node)
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
for ctrl_inp in ctrl_inputs:
if ctrl_inp in self._copy_send_nodes:
continue
if ctrl_inp not in self._node_ctrl_recipients:
self._node_ctrl_recipients[ctrl_inp] = []
self._node_ctrl_recipients[ctrl_inp].append(node)
def _prune_nodes_from_input_and_recipient_maps(self, nodes_to_prune):
"""Prune nodes out of input and recipient maps.
Args:
nodes_to_prune: (`list` of `str`) Names of the nodes to be pruned.
"""
for node in nodes_to_prune:
del self._node_inputs[node]
del self._node_ctrl_inputs[node]
del self._node_recipients[node]
del self._node_ctrl_recipients[node]
def _reconstruct_non_debug_graph_def(self):
"""Reconstruct non-debug GraphDef.
Non-debug GraphDef means the original GraphDef without the Copy* and Debug
nodes inserted by the debugger.
"""
if self._non_debug_graph_def:
return
self._non_debug_graph_def = graph_pb2.GraphDef()
for node in self._debug_graph_def.node:
if is_copy_node(node.name) or is_debug_node(node.name):
continue
new_node = self._non_debug_graph_def.node.add()
new_node.CopyFrom(node)
# Redo the list of inputs, because in _debug_graph_def, the list can
# consist of Copy* and Debug* nodes inserted by the debugger. Those will
# be replaced with the original inputs here.
del new_node.input[:]
for inp in self._node_inputs[node.name]:
new_node.input.append(inp)
for ctrl_inp in self._node_ctrl_inputs[node.name]:
new_node.input.append("^" + ctrl_inp)
@property
def device_name(self):
return self._device_name
@property
def debug_graph_def(self):
"""The debugger-decorated GraphDef."""
return self._debug_graph_def
@property
def non_debug_graph_def(self):
"""The GraphDef without the Copy* and Debug* nodes added by the debugger."""
self._reconstruct_non_debug_graph_def()
return self._non_debug_graph_def
@property
def node_devices(self):
return self._node_devices
@property
def node_op_types(self):
return self._node_op_types
@property
def node_attributes(self):
return self._node_attributes
@property
def node_inputs(self):
return self._node_inputs
@property
def node_ctrl_inputs(self):
return self._node_ctrl_inputs
@property
def node_reversed_ref_inputs(self):
return self._node_reversed_ref_inputs
@property
def node_recipients(self):
return self._node_recipients
@property
def node_ctrl_recipients(self):
return self._node_ctrl_recipients
def reconstruct_non_debug_graph_def(debug_graph_def):
"""Reconstruct original (non-debugger-decorated) partition GraphDef.
This method strips the input `tf.GraphDef` of the Copy* and Debug*-type nodes
inserted by the debugger.
The reconstructed partition graph is identical to the original (i.e.,
non-debugger-decorated) partition graph except in the following respects:
1) The exact names of the runtime-inserted internal nodes may differ.
These include _Send, _Recv, _HostSend, _HostRecv, _Retval ops.
2) As a consequence of 1, the nodes that receive input directly from such
send- and recv-type ops will have different input names.
3) The parallel_iteration attribute of while-loop Enter ops are set to 1.
Args:
debug_graph_def: The debugger-decorated `tf.GraphDef`, with the
debugger-inserted Copy* and Debug* nodes.
Returns:
The reconstructed `tf.GraphDef` stripped of the debugger-inserted nodes.
"""
return DebugGraph(debug_graph_def).non_debug_graph_def
| 31.996032 | 81 | 0.673819 |
7958f5e69f44eab0ddeba3b0c6563964502d4083 | 640 | bzl | Python | third_party/ruy/workspace.bzl | Kinoo2/tensorflow | e334eb2f95bdece6f0df3eff0cf9c402078fe392 | [
"Apache-2.0"
] | 2 | 2021-12-04T07:29:53.000Z | 2021-12-04T07:30:00.000Z | third_party/ruy/workspace.bzl | Kinoo2/tensorflow | e334eb2f95bdece6f0df3eff0cf9c402078fe392 | [
"Apache-2.0"
] | 3 | 2021-08-25T15:10:14.000Z | 2022-02-10T04:33:14.000Z | third_party/ruy/workspace.bzl | Kinoo2/tensorflow | e334eb2f95bdece6f0df3eff0cf9c402078fe392 | [
"Apache-2.0"
] | 3 | 2021-09-26T22:20:25.000Z | 2021-09-26T23:07:13.000Z | """Loads the ruy library, used by TensorFlow Lite."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
tf_http_archive(
name = "ruy",
sha256 = "fa9a0b9041095817bc3533f7b125c3b4044570c0b3ee6c436d2d29dae001c06b",
strip_prefix = "ruy-e6c1b8dc8a8b00ee74e7268aac8b18d7260ab1ce",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/ruy/archive/e6c1b8dc8a8b00ee74e7268aac8b18d7260ab1ce.zip",
"https://github.com/google/ruy/archive/e6c1b8dc8a8b00ee74e7268aac8b18d7260ab1ce.zip",
],
build_file = "//third_party/ruy:BUILD",
)
| 40 | 142 | 0.698438 |
7958f5e799516c2ad41b7386ed8ec98bcae8cce5 | 890 | py | Python | _teaching/csci127-summer-2020/readings/activities/raven_words_updated.py | lgw2/lgw2.github.io | 3e2b0fb849407c26a64afd8e97be0eff7ce07f9b | [
"MIT"
] | null | null | null | _teaching/csci127-summer-2020/readings/activities/raven_words_updated.py | lgw2/lgw2.github.io | 3e2b0fb849407c26a64afd8e97be0eff7ce07f9b | [
"MIT"
] | null | null | null | _teaching/csci127-summer-2020/readings/activities/raven_words_updated.py | lgw2/lgw2.github.io | 3e2b0fb849407c26a64afd8e97be0eff7ce07f9b | [
"MIT"
] | null | null | null | f = open('raven.txt', 'r')
# create an empty dictionary
count = {}
for line in f:
for word in line.split():
# remove punctuation
word = word.replace('_', '').replace('"', '').replace(',', '').replace('.', '')
word = word.replace('-', '').replace('?', '').replace('!', '').replace("'", "")
word = word.replace('(', '').replace(')', '').replace(':', '').replace('[', '')
word = word.replace(']', '').replace(';', '')
# ignore case
word = word.lower()
# add to dictionary
if word in count:
count[word] = count[word] + 1
else:
count[word] = 1
f.close()
# create an ordered list of keys
keys = list(count.keys())
keys.sort()
out = open('word_counts.txt', 'w')
# iterate over keys and write to file
for key in keys:
out.write(key + ' ' + str(count[key]) + '\n')
out.close()
| 24.054054 | 87 | 0.507865 |
7958f5ee1defe1d52c82ca39a7d5cde1d24f524d | 25,643 | py | Python | test/functional/sendheaders.py | wolfoxonly/dk | 090c9862a1a14c187eefcb8285e43601db5ed35b | [
"MIT"
] | null | null | null | test/functional/sendheaders.py | wolfoxonly/dk | 090c9862a1a14c187eefcb8285e43601db5ed35b | [
"MIT"
] | null | null | null | test/functional/sendheaders.py | wolfoxonly/dk | 090c9862a1a14c187eefcb8285e43601db5ed35b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Dealtoken Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes:
- node0 is the node-under-test. We create two p2p connections to it. The
first p2p connection is a control and should only ever receive inv's. The
second p2p connection tests the headers sending logic.
- node1 is used to create reorgs.
test_null_locators
==================
Sends two getheaders requests with null locator values. First request's hashstop
value refers to validated block, while second request's hashstop value refers to
a block which hasn't been validated. Verifies only the first request returns
headers.
test_nonnull_locators
=====================
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.mininode import (
CBlockHeader,
CInv,
NODE_WITNESS,
network_thread_start,
P2PInterface,
mininode_lock,
msg_block,
msg_getblocks,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_sendheaders,
)
from test_framework.test_framework import DealtokenTestFramework
from test_framework.util import (
assert_equal,
sync_blocks,
wait_until,
)
DIRECT_FETCH_RESPONSE_TIME = 0.05
class BaseNode(P2PInterface):
def __init__(self):
super().__init__()
self.block_announced = False
self.last_blockhash_announced = None
def send_get_data(self, block_hashes):
"""Request data for a list of block hashes."""
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.send_message(msg)
def send_get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def on_inv(self, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, message):
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
def check_last_announcement(self, headers=None, inv=None):
"""Test whether the last announcement received had the right header or the right inv.
inv and headers should be lists of block hashes."""
test_function = lambda: self.block_announced
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
self.block_announced = False
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
if inv is not None:
assert_equal(compare_inv, inv)
compare_headers = []
if "headers" in self.last_message:
compare_headers = [x.sha256 for x in self.last_message["headers"].headers]
if headers is not None:
assert_equal(compare_headers, headers)
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
class SendHeadersTest(DealtokenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def mine_blocks(self, count):
"""Mine count blocks and return the new tip."""
# Clear out last block announcement from each p2p listener
[x.clear_last_announcement() for x in self.nodes[0].p2ps]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
def mine_reorg(self, length):
"""Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks).
Note: we clear the state of our p2p connections after the
to-be-reorged-out blocks are mined, so that we don't break later tests.
return the list of block hashes newly mined."""
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
for x in self.nodes[0].p2ps:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length + 1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = self.nodes[0].add_p2p_connection(BaseNode())
# Make sure NODE_NETWORK is not set for test_node, so no block download
# will occur outside of direct fetching
test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=NODE_WITNESS)
network_thread_start()
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
# Ensure verack's have been processed by our peer
inv_node.sync_with_ping()
test_node.sync_with_ping()
self.test_null_locators(test_node, inv_node)
self.test_nonnull_locators(test_node, inv_node)
def test_null_locators(self, test_node, inv_node):
tip = self.nodes[0].getblockheader(self.nodes[0].generate(1)[0])
tip_hash = int(tip["hash"], 16)
inv_node.check_last_announcement(inv=[tip_hash], headers=[])
test_node.check_last_announcement(inv=[tip_hash], headers=[])
self.log.info("Verify getheaders with null locator and valid hashstop returns headers.")
test_node.clear_last_announcement()
test_node.send_get_headers(locator=[], hashstop=tip_hash)
test_node.check_last_announcement(headers=[tip_hash])
self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.")
block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1)
block.solve()
test_node.send_header_for_blocks([block])
test_node.clear_last_announcement()
test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16))
test_node.sync_with_ping()
assert_equal(test_node.block_announced, False)
inv_node.clear_last_announcement()
test_node.send_message(msg_block(block))
inv_node.check_last_announcement(inv=[int(block.hash, 16)], headers=[])
def test_nonnull_locators(self, test_node, inv_node):
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
inv_node.check_last_announcement(inv=[tip], headers=[])
test_node.check_last_announcement(inv=[tip], headers=[])
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# next try requesting header and block
test_node.send_get_headers(locator=[old_tip], hashstop=tip)
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height + 1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256])
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
self.log.info("Part 1: success!")
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.send_get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
inv_node.check_last_announcement(inv=[tip], headers=[])
test_node.check_last_announcement(headers=[tip])
height = self.nodes[0].getblockcount() + 1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders()
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[inv_node.send_block_inv(x.sha256) for x in blocks]
test_node.wait_for_getdata([x.sha256 for x in blocks])
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert "inv" not in inv_node.last_message
assert "headers" not in inv_node.last_message
tip = self.mine_blocks(1)
inv_node.check_last_announcement(inv=[tip], headers=[])
test_node.check_last_announcement(headers=[tip])
height += 1
block_time += 1
self.log.info("Part 2: success!")
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
inv_node.check_last_announcement(inv=[tip], headers=[])
test_node.check_last_announcement(headers=new_block_hashes)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
inv_node.check_last_announcement(inv=[tip], headers=[])
test_node.check_last_announcement(inv=[tip], headers=[])
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator=[fork_point])
test_node.check_last_announcement(inv=new_block_hashes, headers=[])
test_node.send_get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
inv_node.check_last_announcement(inv=[tip], headers=[])
test_node.check_last_announcement(inv=[tip], headers=[])
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.send_get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
inv_node.check_last_announcement(inv=[tip], headers=[])
test_node.check_last_announcement(headers=[tip])
self.log.info("Part 3: success!")
self.log.info("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert "getdata" not in test_node.last_message
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME)
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 1 more header should not trigger any response
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
[test_node.send_message(msg_block(x)) for x in blocks]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders()
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders()
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i % len(blocks)]])
test_node.wait_for_getheaders()
# Eventually this stops working.
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
self.log.info("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert "getdata" not in inv_node.last_message
if __name__ == '__main__':
SendHeadersTest().main()
| 42.809683 | 130 | 0.636977 |
7958f7ee15ea14ac7b8f536ccb18dbbbe6f987d0 | 32,020 | py | Python | tensorflow/python/eager/core_test.py | plopresti/tensorflow | 8b0c84d30d957596cbb3bcac9245e114c3f0b65b | [
"Apache-2.0"
] | 1 | 2019-07-15T08:40:24.000Z | 2019-07-15T08:40:24.000Z | tensorflow/python/eager/core_test.py | plopresti/tensorflow | 8b0c84d30d957596cbb3bcac9245e114c3f0b65b | [
"Apache-2.0"
] | 3 | 2020-10-14T00:35:40.000Z | 2022-02-09T22:35:09.000Z | tensorflow/python/eager/core_test.py | plopresti/tensorflow | 8b0c84d30d957596cbb3bcac9245e114c3f0b65b | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import pickle
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
def current_device():
return constant_op.constant(1.).device
def configure_virtual_cpus():
cpus = config.list_physical_devices('CPU')
# Set 2 virtual CPUs
config.set_virtual_device_configuration(cpus[0], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
class TFETest(test_util.TensorFlowTestCase):
def setUp(self):
super(TFETest, self).setUp()
configure_virtual_cpus()
def _test_hashable(self, a, b, hashable):
if hashable:
self.assertIsInstance(b, collections.Hashable)
self.assertLen(set([a, b]), 2)
else:
# TODO(gjn): Figure out how to make this work for tf.Tensor
# self.assertNotIsInstance(b, collections.Hashable)
with self.assertRaisesRegexp(TypeError, 'unhashable'):
set([a, b])
def testEquality(self):
default = ops.Tensor._USE_EQUALITY
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(1.0)
constant_b = constant_op.constant(1.0)
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(1.0)
variable_b = variables.Variable(1.0)
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
# We only test numpy behaviour in v2 mode since we'd like to match that.
numpy_a = np.array(1.0)
numpy_b = np.array(1.0)
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
def testEqualityNan(self):
default = ops.Tensor._USE_EQUALITY
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertNotEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(float('nan'))
constant_b = constant_op.constant(float('nan'))
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(float('nan'))
variable_b = variables.Variable(float('nan'))
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
numpy_a = np.array(float('nan'))
numpy_b = np.array(float('nan'))
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.execution_mode)
ctx.execution_mode = context.ASYNC
self.assertEqual(context.ASYNC, ctx.execution_mode)
ctx.execution_mode = context.SYNC
self.assertEqual(context.SYNC, ctx.execution_mode)
self.assertIsNone(ctx.summary_writer)
ctx.summary_writer = 'mock'
self.assertEqual('mock', ctx.summary_writer)
self.assertIsNone(ctx.summary_recording)
ctx.summary_recording = 'mock'
self.assertEqual('mock', ctx.summary_recording)
self.assertIsNone(ctx.summary_step)
ctx.summary_step = 'mock'
self.assertEqual('mock', ctx.summary_step)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testRunMetadata(self):
context.enable_run_metadata()
t = constant_op.constant(1.0)
_ = t + t # Runs an operation which will be in the RunMetadata
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
step_stats = run_metadata.step_stats
self.assertGreater(len(step_stats.dev_stats), 0)
cpu_stats = step_stats.dev_stats[0]
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
cpu_stats.device)
self.assertGreaterEqual(len(cpu_stats.node_stats), 1)
def testMultiCpuPlacement(self):
with ops.device('cpu:1'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1')
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
@test_util.run_gpu_only
def testShouldCopy(self):
with ops.device('gpu:0'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
@test_util.run_gpu_only
def testInt32GPU(self):
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(),
ctx.scope_name,
ctx.summary_writer,
ctx.summary_recording,
ctx.summary_step,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
@test_util.run_gpu_only
def testContextConfig(self):
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEquals(0, ctx.num_gpus())
def testPickle(self):
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, 't.pickle')
with open(fname, 'wb') as f:
t = constant_op.constant(10.0)
pickle.dump(t, f)
with open(fname, 'rb') as f:
t = pickle.load(f)
self.assertAllEqual(t.numpy(), 10.0)
@test_util.run_gpu_only
def testDevicePlacementEnforcesConsistency(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
cpu.__enter__()
self.assertEndsWith(current_device(), 'CPU:0')
gpu.__enter__()
self.assertEndsWith(current_device(), 'GPU:0')
with self.assertRaisesRegexp(
RuntimeError, 'Exiting device scope without proper scope nesting'):
cpu.__exit__()
self.assertEndsWith(current_device(), 'GPU:0')
gpu.__exit__()
self.assertEndsWith(current_device(), 'CPU:0')
@test_util.run_gpu_only
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
with cpu:
with gpu:
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'CPU:0')
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
@test_util.run_gpu_only
def testTensorPlacement(self):
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
@test_util.run_gpu_only
def testResourceTensorPlacement(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
@test_util.run_gpu_only
def testCopyBetweenDevices(self):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.async_wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.async_wait()
context.async_clear_error()
@test_util.run_gpu_only
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
def testPyFunctionNullContext(self):
def simple_fn(unused_handle):
return 1.
@def_function.function
def test_fn(v):
script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)
return 1.
test_var = variables.Variable([2., 3.])
self.assertAllEqual(test_fn(test_var), 1.0)
@test_util.run_gpu_only
def testNumpyForceCPU(self):
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
context.set_execution_mode(context.ASYNC)
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))
context.async_wait()
context.async_clear_error()
context.context().execution_mode = context.SYNC
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
@test_util.run_gpu_only
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3]]),
constant_op.constant([[5]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
@test_util.run_gpu_only
def testOperationWithNoInputsRunsOnDevice(self):
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEquals(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
def testConvertMixedEagerTensorsWithVariables(self):
var = resource_variable_ops.ResourceVariable(1.0)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
['foo', var], context.context())
self.assertAllEqual([dtypes.string, dtypes.float32], types)
for t in tensors:
self.assertIsInstance(t, ops.EagerTensor)
# TODO(b/123637108): re-enable
@test_util.run_gpu_only
def disabled_testSmallIntegerOpsForcedToCPU(self):
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op forced to CPU since all constants are integers and small.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:CPU:0')
a = array_ops.zeros((8, 10), dtype=dtypes.int64)
b = array_ops.ones((8, 10), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the tensors are larger than 64 elements.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the constants are not integers.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
def testExecutionModeIsStoredThreadLocal(self):
cv = threading.Condition()
count = [0]
num_threads = 10
def execution_mode_test(cond, count, num_threads, ctx, mode):
cond.acquire()
# Ensure that all threads set their mode simultaneously
# Note that this is not a simple assignment, as the execution_mode is an
# @property with a custom setter.
ctx.execution_mode = mode
count[0] = count[0] + 1
if count[0] < num_threads:
cond.wait()
else:
cond.notify_all()
cond.release()
self.assertEqual(ctx.execution_mode, mode)
ctx = context.Context()
threads = []
for i in range(num_threads):
t = threading.Thread(
target=execution_mode_test,
args=(cv, count, num_threads, ctx,
context.SYNC if i % 2 == 0 else context.ASYNC))
t.start()
threads.append(t)
for t in threads:
t.join()
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def setUp(self):
super(SendRecvTest, self).setUp()
configure_virtual_cpus()
def testBasic(self):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = constant_op.constant(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
class EagerTensorCacheTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EagerTensorCacheTest, self).setUp()
configure_virtual_cpus()
def testCacheSkipsTensorsTooLarge(self):
cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)
cache.put('1', array_ops.zeros((2, 2)))
self.assertEqual(cache.get('1'), None)
cache.put('2', array_ops.zeros((2)))
self.assertNotEqual(cache.get('2'), None)
if __name__ == '__main__':
test.main()
| 33.991507 | 115 | 0.656059 |
7958f87ce89ecf5faa5a0038ffdafbd84e91299e | 8,083 | py | Python | tests/test_qmexcitedstatetrajectories.py | PotentialParadox/pynasqm | 1bd51299b6ca7f8229d8a15428515d53a358903c | [
"MIT"
] | 1 | 2020-03-13T22:34:03.000Z | 2020-03-13T22:34:03.000Z | tests/test_qmexcitedstatetrajectories.py | PotentialParadox/pynasqm | 1bd51299b6ca7f8229d8a15428515d53a358903c | [
"MIT"
] | null | null | null | tests/test_qmexcitedstatetrajectories.py | PotentialParadox/pynasqm | 1bd51299b6ca7f8229d8a15428515d53a358903c | [
"MIT"
] | null | null | null | '''
Units tests for the cpptraj wrappers for nasqm
'''
import os
import subprocess
import types
import pytest
from pynasqm.trajectories.qmexcitedstatetrajectories import QmExcitedStateTrajectories
from pynasqm.trajectories.create_restarts import create_restarts_from_parent
from pynasqm.inputceon import InputCeon
from pynasqm.utils import mkdir, touch
def setup_module(module):
'''
Switch to test directory
'''
os.chdir("tests/qmExcitedTrajectories")
def teardown_module(module):
'''
Return to main directory
'''
os.chdir("../..")
@pytest.fixture
def userInput():
user_input = types.SimpleNamespace()
user_input.is_qmmm = False
user_input.is_hpc = True
user_input.number_nodes = 1
user_input.processors_per_node = 16
user_input.memory_per_node = "2000mb"
user_input.max_jobs = 4
user_input.job_name = "MyJob"
user_input.walltime = "00:01:00"
user_input.qos = "roitberg"
user_input.email = "dtracy.uf@gmail.com"
user_input.email_options = 2
user_input.n_steps_gs = 1
user_input.n_steps_print_gmcrd = 10
user_input.n_steps_to_print_gs = 15
user_input.qmexcited_traj_index_file = ""
user_input.time_step = 0.05
user_input.ground_state_run_time = 0.01
user_input.n_snapshots_ex = 2
user_input.n_mcrd_frames_es = 5
user_input.n_ground_runs = 2
user_input.n_flu_runs = 2
user_input.n_qmground_runs = 2
user_input.restart_attempt = 0
user_input.restrain_solvents = True
return user_input
@pytest.fixture
def inputCeon():
return InputCeon(amber_input='md_qmmm_amb.in', directory='./')
def test_fluCreateFromQmground(userInput, inputCeon):
'''
Create the restart files for the initial trajectory part of two trajectories
'''
mkdir("qmground")
mkdir("qmground/traj_1")
mkdir("qmground/traj_2")
mkdir("qmground/traj_1/restart_1")
mkdir("qmground/traj_2/restart_1")
mkdir("qmground/traj_1/nmr")
mkdir("qmground/traj_2/nmr")
touch("qmground/traj_1/restart_1/snap_for_qmground_t1_r2.rst")
touch("qmground/traj_2/restart_1/snap_for_qmground_t2_r2.rst")
open("qmground/traj_1/nmr/rst_1.dist", 'w').write("rst_1")
open("qmground/traj_2/nmr/rst_2.dist", 'w').write("rst_2")
open("qmground/traj_1/nmr/closest_1.txt", 'w').write("rst_1")
open("qmground/traj_2/nmr/closest_2.txt", 'w').write("rst_2")
flu_traj = QmExcitedStateTrajectories(userInput, inputCeon)
override = False
create_restarts_from_parent(flu_traj.traj_data, 0, override=True)
if not os.path.isfile("qmexcited/traj_1/restart_0/snap_for_qmexcited_t1_r0.rst"):
raise AssertionError("QmExcitedStateTrajectory did not create snap_for_qmexcited_t1_r0.rst")
if not os.path.isfile("qmexcited/traj_2/restart_0/snap_for_qmexcited_t2_r0.rst"):
raise AssertionError("QmExcitedStateTrajectory did not create snap_for_qmexcited_t2_r0.rst")
if os.path.isdir("ground_snap.3"):
raise AssertionError("QmExcitedStateTrajectory created too many ground_snaps")
if os.path.isdir('"qmexcited/traj_3'):
raise AssertionError("QmExcitedStateTrajectory created too many directories")
if open("qmexcited/traj_1/nmr/rst_1.dist").read() != "rst_1":
raise AssertionError("QmExcitedStateTrajectory did not copy nmr data from qmground for traj 1")
if open("qmexcited/traj_2/nmr/rst_2.dist").read() != "rst_2":
raise AssertionError("QmExcitedStateTrajectory did not copy nmr data from qmground for traj 2")
subprocess.run(['rm', '-rf', 'qmexcited', './convert_to_crd.out', './convert_to_crd.out', 'qmground'])
def test_fluCreateFromQmGroundFail(userInput, inputCeon):
'''
Make sure the program doesn't crash if the qmground restart file wasn't created
In this instance the first trajectory didn't produce an appropriate output
'''
mkdir("qmground")
mkdir("qmground/traj_1")
mkdir("qmground/traj_2")
mkdir("qmground/traj_1/restart_1")
mkdir("qmground/traj_2/restart_1")
mkdir("qmground/traj_1/nmr")
mkdir("qmground/traj_2/nmr")
open("qmground/traj_1/nmr/rst_1.dist", 'w').write("rst_1")
open("qmground/traj_2/nmr/rst_2.dist", 'w').write("rst_2")
open("qmground/traj_1/nmr/closest_1.txt", 'w').write("rst_1")
open("qmground/traj_2/nmr/closest_2.txt", 'w').write("rst_2")
# Failed touch("qmground/traj_2/restart_1/snap_for_qmground_t1_r2.rst")
touch("qmground/traj_2/restart_1/snap_for_qmground_t2_r2.rst")
flu_traj = QmExcitedStateTrajectories(userInput, inputCeon)
create_restarts_from_parent(flu_traj.traj_data, 0, override=True)
# if not os.path.isfile("qmexcited/traj_1/restart_0/snap_for_qmexcited_t1_r0.rst"):
# raise AssertionError("QmExcitedStateTrajectory did not create a snap_for_qmexcited_t1_r0.rst Dummy")
if not os.path.isfile("qmexcited/traj_2/restart_0/snap_for_qmexcited_t2_r0.rst"):
raise AssertionError("QmExcitedStateTrajectory did not create snap_for_qmexcited_t2_r0.rst")
subprocess.run(['rm', '-rf', 'qmexcited', './convert_to_crd.out', './convert_to_crd.out', 'qmground'])
def test_fluCreateFromRestarts(userInput, inputCeon):
'''
Create the restart files for the second set of flu trajectories
'''
mkdir("flu")
mkdir("qmexcited/traj_1")
mkdir("qmexcited/traj_2")
mkdir("qmexcited/traj_1/restart_0")
mkdir("qmexcited/traj_2/restart_0")
mkdir("qmexcited/traj_1/nmr")
mkdir("qmexcited/traj_2/nmr")
touch("qmexcited/traj_1/restart_0/snap_for_qmexcited_t1_r1.rst")
touch("qmexcited/traj_2/restart_0/snap_for_qmexcited_t2_r1.rst")
open("qmexcited/traj_1/nmr/rst_1.dist", 'w').write("rst_1")
open("qmexcited/traj_2/nmr/rst_2.dist", 'w').write("rst_2")
open("qmexcited/traj_1/nmr/closest_1.txt", 'w').write("rst_1")
open("qmexcited/traj_2/nmr/closest_2.txt", 'w').write("rst_2")
userInput.restart_attempt = 1
flu_traj = QmExcitedStateTrajectories(userInput, inputCeon)
override = False
create_restarts_from_parent(flu_traj.traj_data, 0, override=True)
if not os.path.isfile("qmexcited/traj_1/restart_1/snap_for_qmexcited_t1_r1.rst"):
raise AssertionError("QmExcitedStateTrajectory did not create snap_for_qmexcited_t1_r1.rst")
if not os.path.isfile("qmexcited/traj_2/restart_1/snap_for_qmexcited_t2_r1.rst"):
raise AssertionError("QmExcitedStateTrajectory did not create snap_for_qmexcited_t2_r1.rst")
if "rst_1" not in open("qmexcited/traj_1/nmr/rst_1.dist").read():
raise AssertionError("QmExcitedStateTrajectory updated nmr of traj_1 during the first restart")
if "rst_2" not in open("qmexcited/traj_2/nmr/rst_2.dist").read():
raise AssertionError("QmExcitedStateTrajectory updated nmr of traj_2 during the first restart")
if "rst_1" not in open("qmexcited/traj_1/nmr/closest_1.txt").read():
raise AssertionError("QmExcitedStateTrajectory updated nmr of traj_1 during the first restart")
if "rst_2" not in open("qmexcited/traj_2/nmr/closest_2.txt").read():
raise AssertionError("QmExcitedStateTrajectory updated nmr of traj_2 during the first restart")
subprocess.run(['rm', '-rf', 'qmexcited', './convert_to_crd.out', './convert_to_crd.out', 'qmground'])
def test_fluPrepareDynamics0(userInput, inputCeon):
'''
Prepare dynamics for the zeroth restart of two trajectories
'''
userInput.restart_attempt = 0
fluTraj = QmExcitedStateTrajectories(userInput, inputCeon)
_, slurm_file = fluTraj.prepareScript()
result = "\n".join((slurm_file.splitlines())[-10:])
answer = open("1of2_slurm_attempt_test.sbatch").read()
assert result == answer
def test_fluPrepareDynamics1(userInput, inputCeon):
'''
Prepare dynamics for the first restart of two trajectories
'''
userInput.restart_attempt = 1
fluTraj = QmExcitedStateTrajectories(userInput, inputCeon)
_, slurm_file = fluTraj.prepareScript()
result = "\n".join((slurm_file.splitlines())[-10:])
answer = open("2of2_slurm_attempt_test.sbatch").read()
assert result == answer
| 45.666667 | 110 | 0.733515 |
7958f8913d5189d833372e3e601da3a945b063b2 | 9,050 | py | Python | sympy/testing/pytest.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | sympy/testing/pytest.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | sympy/testing/pytest.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | """py.test hacks to support XFAIL/XPASS"""
from __future__ import print_function, division
import sys
import functools
import os
import contextlib
import warnings
from sympy.core.compatibility import get_function_name
from sympy.utilities.exceptions import SymPyDeprecationWarning
ON_TRAVIS = os.getenv("TRAVIS_BUILD_NUMBER", None)
try:
import pytest
USE_PYTEST = getattr(sys, "_running_pytest", False)
except ImportError:
USE_PYTEST = False
if USE_PYTEST:
raises = pytest.raises
warns = pytest.warns
skip = pytest.skip
XFAIL = pytest.mark.xfail
SKIP = pytest.mark.skip
slow = pytest.mark.slow
nocache_fail = pytest.mark.nocache_fail
else:
# Not using pytest so define the things that would have been imported from
# there.
def raises(expectedException, code=None):
"""
Tests that ``code`` raises the exception ``expectedException``.
``code`` may be a callable, such as a lambda expression or function
name.
If ``code`` is not given or None, ``raises`` will return a context
manager for use in ``with`` statements; the code to execute then
comes from the scope of the ``with``.
``raises()`` does nothing if the callable raises the expected exception,
otherwise it raises an AssertionError.
Examples
========
>>> from sympy.testing.pytest import raises
>>> raises(ZeroDivisionError, lambda: 1/0)
>>> raises(ZeroDivisionError, lambda: 1/2)
Traceback (most recent call last):
...
Failed: DID NOT RAISE
>>> with raises(ZeroDivisionError):
... n = 1/0
>>> with raises(ZeroDivisionError):
... n = 1/2
Traceback (most recent call last):
...
Failed: DID NOT RAISE
Note that you cannot test multiple statements via
``with raises``:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise, aborting the ``with``
... n = 9999/0 # never executed
This is just what ``with`` is supposed to do: abort the
contained statement sequence at the first exception and let
the context manager deal with the exception.
To test multiple statements, you'll need a separate ``with``
for each:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise
>>> with raises(ZeroDivisionError):
... n = 9999/0 # will also execute and raise
"""
if code is None:
return RaisesContext(expectedException)
elif callable(code):
try:
code()
except expectedException:
return
raise Failed("DID NOT RAISE")
elif isinstance(code, str):
raise TypeError(
"'raises(xxx, \"code\")' has been phased out; "
"change 'raises(xxx, \"expression\")' "
"to 'raises(xxx, lambda: expression)', "
"'raises(xxx, \"statement\")' "
"to 'with raises(xxx): statement'"
)
else:
raise TypeError("raises() expects a callable for the 2nd argument.")
class RaisesContext(object):
def __init__(self, expectedException):
self.expectedException = expectedException
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
raise Failed("DID NOT RAISE")
return issubclass(exc_type, self.expectedException)
class XFail(Exception):
pass
class XPass(Exception):
pass
class Skipped(Exception):
pass
class Failed(Exception):
pass
def XFAIL(func):
def wrapper():
try:
func()
except Exception as e:
message = str(e)
if message != "Timeout":
raise XFail(get_function_name(func))
else:
raise Skipped("Timeout")
raise XPass(get_function_name(func))
wrapper = functools.update_wrapper(wrapper, func)
return wrapper
def skip(str):
raise Skipped(str)
def SKIP(reason):
"""Similar to ``skip()``, but this is a decorator. """
def wrapper(func):
def func_wrapper():
raise Skipped(reason)
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
return wrapper
def slow(func):
func._slow = True
def func_wrapper():
func()
func_wrapper = functools.update_wrapper(func_wrapper, func)
func_wrapper.__wrapped__ = func
return func_wrapper
def nocache_fail(func):
"Dummy decorator for marking tests that fail when cache is disabled"
return func
@contextlib.contextmanager
def warns(warningcls, **kwargs):
"""Like raises but tests that warnings are emitted.
>>> from sympy.testing.pytest import warns
>>> import warnings
>>> with warns(UserWarning):
... warnings.warn('deprecated', UserWarning)
>>> with warns(UserWarning):
... pass
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type UserWarning\
was emitted. The list of emitted warnings is: [].
"""
match = kwargs.pop("match", "")
if kwargs:
raise TypeError("Invalid keyword arguments: %s" % kwargs)
# Absorbs all warnings in warnrec
with warnings.catch_warnings(record=True) as warnrec:
# Hide all warnings but make sure that our warning is emitted
warnings.simplefilter("ignore")
warnings.filterwarnings("always", match, warningcls)
# Now run the test
yield
# Raise if expected warning not found
if not any(issubclass(w.category, warningcls) for w in warnrec):
msg = (
"Failed: DID NOT WARN."
" No warnings of type %s was emitted."
" The list of emitted warnings is: %s."
) % (warningcls, [w.message for w in warnrec])
raise Failed(msg)
@contextlib.contextmanager
def warns_deprecated_sympy():
"""Shorthand for ``warns(SymPyDeprecationWarning)``
This is the recommended way to test that ``SymPyDeprecationWarning`` is
emitted for deprecated features in SymPy. To test for other warnings use
``warns``. To suppress warnings without asserting that they are emitted
use ``ignore_warnings``.
>>> from sympy.testing.pytest import warns_deprecated_sympy
>>> from sympy.utilities.exceptions import SymPyDeprecationWarning
>>> import warnings
>>> with warns_deprecated_sympy():
... SymPyDeprecationWarning("Don't use", feature="old thing",
... deprecated_since_version="1.0", issue=123).warn()
>>> with warns_deprecated_sympy():
... pass
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type \
SymPyDeprecationWarning was emitted. The list of emitted warnings is: [].
"""
with warns(SymPyDeprecationWarning):
yield
@contextlib.contextmanager
def ignore_warnings(warningcls):
"""Context manager to suppress warnings during tests.
This function is useful for suppressing warnings during tests. The warns
function should be used to assert that a warning is raised. The
ignore_warnings function is useful in situation when the warning is not
guaranteed to be raised (e.g. on importing a module) or if the warning
comes from third-party code.
When the warning is coming (reliably) from SymPy the warns function should
be preferred to ignore_warnings.
>>> from sympy.testing.pytest import ignore_warnings
>>> import warnings
Here's a warning:
>>> with warnings.catch_warnings(): # reset warnings in doctest
... warnings.simplefilter('error')
... warnings.warn('deprecated', UserWarning)
Traceback (most recent call last):
...
UserWarning: deprecated
Let's suppress it with ignore_warnings:
>>> with warnings.catch_warnings(): # reset warnings in doctest
... warnings.simplefilter('error')
... with ignore_warnings(UserWarning):
... warnings.warn('deprecated', UserWarning)
(No warning emitted)
"""
# Absorbs all warnings in warnrec
with warnings.catch_warnings(record=True) as warnrec:
# Make sure our warning doesn't get filtered
warnings.simplefilter("always", warningcls)
# Now run the test
yield
# Reissue any warnings that we aren't testing for
for w in warnrec:
if not issubclass(w.category, warningcls):
warnings.warn_explicit(w.message, w.category, w.filename, w.lineno)
| 31.314879 | 80 | 0.613702 |
7958f8b566b77bb83d1f6f9f41f9bcf25aa8d4cb | 14,837 | py | Python | src/mist/api/keys/views.py | cc-daveloper/mist.io_mist.api | d3f9b8d478f23bf811c0bc6d3078e512aa975f86 | [
"Apache-2.0"
] | 1 | 2019-04-10T11:37:25.000Z | 2019-04-10T11:37:25.000Z | src/mist/api/keys/views.py | d-mo/mist.api | d3f9b8d478f23bf811c0bc6d3078e512aa975f86 | [
"Apache-2.0"
] | 3 | 2021-04-07T23:15:17.000Z | 2021-09-23T23:21:45.000Z | src/mist/api/keys/views.py | cc-daveloper/mist.io_mist.api | d3f9b8d478f23bf811c0bc6d3078e512aa975f86 | [
"Apache-2.0"
] | null | null | null | import mongoengine as me
from pyramid.response import Response
from mist.api.clouds.models import Cloud
from mist.api.machines.models import Machine
from mist.api.keys.models import SignedSSHKey, SSHKey, Key
from mist.api.auth.methods import auth_context_from_request
from mist.api.helpers import view_config, params_from_request
from mist.api.helpers import transform_key_machine_associations
from mist.api.keys.methods import filter_list_keys
from mist.api.keys.methods import delete_key as m_delete_key
from mist.api.exceptions import PolicyUnauthorizedError
from mist.api.exceptions import BadRequestError, KeyParameterMissingError
from mist.api.exceptions import RequiredParameterMissingError, NotFoundError
from mist.api.tag.methods import add_tags_to_resource
OK = Response("OK", 200)
@view_config(route_name='api_v1_keys', request_method='GET', renderer='json')
def list_keys(request):
"""
List keys
Retrieves a list of all added keys
READ permission required on key.
---
"""
auth_context = auth_context_from_request(request)
return filter_list_keys(auth_context)
@view_config(route_name='api_v1_keys', request_method='PUT', renderer='json')
def add_key(request):
"""
Add key
Add key with specific name
ADD permission required on key.
---
id:
description: The key name
required: true
type: string
priv:
description: The private key
required: true
type: string
certificate:
description: The signed public key, when using signed ssh keys
required: false
type: string
"""
params = params_from_request(request)
key_name = params.pop('name', None)
private_key = params.get('priv', None)
certificate = params.get('certificate', None)
auth_context = auth_context_from_request(request)
key_tags = auth_context.check_perm("key", "add", None)
if not key_name:
raise BadRequestError("Key name is not provided")
if not private_key:
raise RequiredParameterMissingError("Private key is not provided")
if certificate:
key = SignedSSHKey.add(auth_context.owner, key_name, **params)
else:
key = SSHKey.add(auth_context.owner, key_name, **params)
if key_tags:
add_tags_to_resource(auth_context.owner, key, key_tags.items())
# since its a new key machines fields should be an empty list
clouds = Cloud.objects(owner=auth_context.owner, deleted=None)
machines = Machine.objects(cloud__in=clouds,
key_associations__keypair__exact=key)
assoc_machines = transform_key_machine_associations(machines, key)
return {'id': key.id,
'name': key.name,
'machines': assoc_machines,
'isDefault': key.default}
@view_config(route_name='api_v1_key_action', request_method='DELETE',
renderer='json')
def delete_key(request):
"""
Delete key
Delete key. When a key gets deleted, it takes its associations with it
so just need to remove from the server too. If the default key gets
deleted, it sets the next one as default, provided that at least another
key exists. It returns the list of all keys after the deletion,
excluding the private keys (check also list_keys).
REMOVE permission required on key.
---
key:
in: path
required: true
type: string
"""
auth_context = auth_context_from_request(request)
key_id = request.matchdict.get('key')
if not key_id:
raise KeyParameterMissingError()
try:
key = Key.objects.get(owner=auth_context.owner, id=key_id,
deleted=None)
except me.DoesNotExist:
raise NotFoundError('Key id does not exist')
auth_context.check_perm('key', 'remove', key.id)
m_delete_key(auth_context.owner, key_id)
return list_keys(request)
@view_config(route_name='api_v1_keys',
request_method='DELETE', renderer='json')
def delete_keys(request):
"""
Delete multiple keys.
Provide a list of key ids to be deleted. The method will try to delete
all of them and then return a json that describes for each key id
whether or not it was deleted or not_found if the key id could not
be located. If no key id was found then a 404(Not Found) response will
be returned.
REMOVE permission required on each key.
---
key_ids:
required: true
type: array
items:
type: string
name: key_id
"""
auth_context = auth_context_from_request(request)
params = params_from_request(request)
key_ids = params.get('key_ids', [])
if type(key_ids) != list or len(key_ids) == 0:
raise RequiredParameterMissingError('No key ids provided')
# remove duplicate ids if there are any
key_ids = set(key_ids)
report = {}
for key_id in key_ids:
try:
key = Key.objects.get(owner=auth_context.owner,
id=key_id, deleted=None)
except me.DoesNotExist:
report[key_id] = 'not_found'
continue
try:
auth_context.check_perm('key', 'remove', key.id)
except PolicyUnauthorizedError:
report[key_id] = 'unauthorized'
else:
delete_key(auth_context.owner, key_id)
report[key_id] = 'deleted'
# if no key id was valid raise exception
if len(filter(lambda key_id: report[key_id] == 'not_found',
report)) == len(key_ids):
raise NotFoundError('No valid key id provided')
# if user was unauthorized for all keys
if len(filter(lambda key_id: report[key_id] == 'unauthorized',
report)) == len(key_ids):
raise NotFoundError('Unauthorized to modify any of the keys')
return report
@view_config(route_name='api_v1_key_action', request_method='PUT',
renderer='json')
def edit_key(request):
"""
Edit a key
Edits a given key's name to new_name
EDIT permission required on key.
---
new_name:
description: The new key name
type: string
key_id:
description: The key id
in: path
required: true
type: string
"""
key_id = request.matchdict['key']
params = params_from_request(request)
new_name = params.get('new_name')
if not new_name:
raise RequiredParameterMissingError("new_name")
auth_context = auth_context_from_request(request)
try:
key = Key.objects.get(owner=auth_context.owner,
id=key_id, deleted=None)
except me.DoesNotExist:
raise NotFoundError('Key with that id does not exist')
auth_context.check_perm('key', 'edit', key.id)
key.ctl.rename(new_name)
return {'new_name': new_name}
@view_config(route_name='api_v1_key_action', request_method='POST')
def set_default_key(request):
"""
Set default key
Sets a new default key
EDIT permission required on key.
---
key:
description: The key id
in: path
required: true
type: string
"""
key_id = request.matchdict['key']
auth_context = auth_context_from_request(request)
try:
key = Key.objects.get(owner=auth_context.owner,
id=key_id, deleted=None)
except me.DoesNotExist:
raise NotFoundError('Key id does not exist')
auth_context.check_perm('key', 'edit', key.id)
key.ctl.set_default()
return OK
@view_config(route_name='api_v1_key_private', request_method='GET',
renderer='json')
def get_private_key(request):
"""
Gets private key from key name.
It is used in single key view when the user clicks the display private key
button.
READ_PRIVATE permission required on key.
---
key:
description: The key id
in: path
required: true
type: string
"""
key_id = request.matchdict['key']
if not key_id:
raise RequiredParameterMissingError("key_id")
auth_context = auth_context_from_request(request)
try:
key = SSHKey.objects.get(owner=auth_context.owner,
id=key_id, deleted=None)
except me.DoesNotExist:
raise NotFoundError('Key id does not exist')
auth_context.check_perm('key', 'read_private', key.id)
return key.private
@view_config(route_name='api_v1_key_public', request_method='GET',
renderer='json')
def get_public_key(request):
"""
Get public key
Gets public key from key name.
READ permission required on key.
---
key:
description: The key id
in: path
required: true
type: string
"""
key_id = request.matchdict['key']
if not key_id:
raise RequiredParameterMissingError("key_id")
auth_context = auth_context_from_request(request)
try:
key = SSHKey.objects.get(owner=auth_context.owner,
id=key_id, deleted=None)
except me.DoesNotExist:
raise NotFoundError('Key id does not exist')
auth_context.check_perm('key', 'read', key.id)
return key.public
@view_config(route_name='api_v1_keys', request_method='POST', renderer='json')
def generate_key(request):
"""
Generate key
Generate key pair
---
"""
key = SSHKey()
key.ctl.generate()
return {'priv': key.private, 'public': key.public}
@view_config(route_name='api_v1_cloud_key_association', request_method='PUT',
renderer='json')
@view_config(route_name='api_v1_key_association', request_method='PUT',
renderer='json')
def associate_key(request):
"""
Associate a key to a machine
Associates a key with a machine. If host is set it will also attempt to
actually deploy it to the machine. To do that it requires another key
(existing_key) that can connect to the machine.
READ permission required on cloud.
READ_PRIVATE permission required on key.
ASSOCIATE_KEY permission required on machine.
---
machine:
in: path
required: true
type: string
key:
in: path
required: true
type: string
port:
default: 22
type: integer
user:
description: The ssh user
type: string
"""
key_id = request.matchdict['key']
cloud_id = request.matchdict.get('cloud')
params = params_from_request(request)
ssh_user = params.get('user', None)
try:
ssh_port = int(request.json_body.get('port', 22))
except:
ssh_port = 22
auth_context = auth_context_from_request(request)
try:
key = Key.objects.get(owner=auth_context.owner,
id=key_id, deleted=None)
except Key.DoesNotExist:
raise NotFoundError('Key id does not exist')
auth_context.check_perm('key', 'read_private', key.id)
if cloud_id:
# this is depracated, keep it for backwards compatibility
machine_id = request.matchdict['machine']
try:
Cloud.objects.get(owner=auth_context.owner,
id=cloud_id, deleted=None)
except Cloud.DoesNotExist:
raise NotFoundError('Cloud does not exist')
auth_context.check_perm("cloud", "read", cloud_id)
try:
machine = Machine.objects.get(cloud=cloud_id,
machine_id=machine_id,
state__ne='terminated')
except Machine.DoesNotExist:
raise NotFoundError("Machine %s doesn't exist" % machine_id)
else:
machine_uuid = request.matchdict['machine']
try:
machine = Machine.objects.get(id=machine_uuid,
state__ne='terminated')
except Machine.DoesNotExist:
raise NotFoundError("Machine %s doesn't exist" % machine_uuid)
cloud_id = machine.cloud.id
auth_context.check_perm("cloud", "read", cloud_id)
auth_context.check_perm("machine", "associate_key", machine.id)
key.ctl.associate(machine, username=ssh_user, port=ssh_port)
clouds = Cloud.objects(owner=auth_context.owner, deleted=None)
machines = Machine.objects(cloud__in=clouds,
key_associations__keypair__exact=key)
assoc_machines = transform_key_machine_associations(machines, key)
return assoc_machines
@view_config(route_name='api_v1_cloud_key_association',
request_method='DELETE', renderer='json')
@view_config(route_name='api_v1_key_association',
request_method='DELETE', renderer='json')
def disassociate_key(request):
"""
Disassociate a key from a machine
Disassociates a key from a machine. If host is set it will also attempt to
actually remove it from the machine.
READ permission required on cloud.
DISASSOCIATE_KEY permission required on machine.
---
key:
in: path
required: true
type: string
machine:
in: path
required: true
type: string
"""
key_id = request.matchdict['key']
cloud_id = request.matchdict.get('cloud')
auth_context = auth_context_from_request(request)
if cloud_id:
# this is depracated, keep it for backwards compatibility
machine_id = request.matchdict['machine']
try:
Cloud.objects.get(owner=auth_context.owner,
id=cloud_id, deleted=None)
except Cloud.DoesNotExist:
raise NotFoundError('Cloud does not exist')
auth_context.check_perm("cloud", "read", cloud_id)
try:
machine = Machine.objects.get(cloud=cloud_id,
machine_id=machine_id,
state__ne='terminated')
except Machine.DoesNotExist:
raise NotFoundError("Machine %s doesn't exist" % machine_id)
else:
machine_uuid = request.matchdict['machine']
try:
machine = Machine.objects.get(id=machine_uuid,
state__ne='terminated')
except Machine.DoesNotExist:
raise NotFoundError("Machine %s doesn't exist" % machine_uuid)
cloud_id = machine.cloud.id
auth_context.check_perm("cloud", "read", cloud_id)
auth_context.check_perm("machine", "disassociate_key", machine.id)
key = Key.objects.get(owner=auth_context.owner, id=key_id, deleted=None)
key.ctl.disassociate(machine)
clouds = Cloud.objects(owner=auth_context.owner, deleted=None)
machines = Machine.objects(cloud__in=clouds,
key_associations__keypair__exact=key)
assoc_machines = transform_key_machine_associations(machines, key)
return assoc_machines
| 32.184382 | 78 | 0.648783 |
7958f8e6cec0a9f14d162274f05dd3bd06f4d35a | 874 | py | Python | setup.py | petryx/extractor | 2c21df8104cb3ac42e95b287d7dafa1406613dba | [
"BSD-3-Clause"
] | null | null | null | setup.py | petryx/extractor | 2c21df8104cb3ac42e95b287d7dafa1406613dba | [
"BSD-3-Clause"
] | 1 | 2020-09-28T15:16:25.000Z | 2020-09-28T18:10:48.000Z | setup.py | petryx/extractor | 2c21df8104cb3ac42e95b287d7dafa1406613dba | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from setuptools import setup
import os,sys
try:
import pypandoc
long_description = pypandoc.convert('README.md','rst')
except (IOError,ImportError):
long_description = open('README.md').read()
setup(name='pyextractor',
version='0.9-beta',
description='Command Line Tool to extract data from files',
long_description=long_description,
url='https://github.com/petryx/extractor',
author='Marlon Petry',
author_email='marlonpetry@gmail.com',
license='BSD',
scripts=['extractor/extractor.py'],
install_requires = [ 'pygrok>=1.0.0',
'textract==1.6.1',
'termcolor',
'pyaml',
],
zip_safe=False)
| 33.615385 | 71 | 0.526316 |
7958f932e5f0ae63ad219ba6fac90f8ff53b91ff | 9,495 | py | Python | tests/parsers/filestat.py | log2timeline/plaso | 406e77c4d8cf2edfc2aad82c61d4f910a038b759 | [
"Apache-2.0"
] | 1,253 | 2015-01-02T13:58:02.000Z | 2022-03-31T08:43:39.000Z | tests/parsers/filestat.py | log2timeline/plaso | 406e77c4d8cf2edfc2aad82c61d4f910a038b759 | [
"Apache-2.0"
] | 3,388 | 2015-01-02T11:17:58.000Z | 2022-03-30T10:21:45.000Z | tests/parsers/filestat.py | log2timeline/plaso | 406e77c4d8cf2edfc2aad82c61d4f910a038b759 | [
"Apache-2.0"
] | 376 | 2015-01-20T07:04:54.000Z | 2022-03-04T23:53:00.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for filestat parser."""
import os
import unittest
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from plaso.lib import definitions
from plaso.parsers import filestat
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class FileStatTest(test_lib.ParserTestCase):
"""Tests for filestat parser."""
def testTSKFile(self):
"""Read a file within an image file and make few tests."""
parser = filestat.FileStatParser()
test_file_path = self._GetTestFilePath(['ímynd.dd'])
self._SkipIfPathNotExists(test_file_path)
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
tsk_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=15,
location='/passwords.txt', parent=os_path_spec)
storage_writer = self._ParseFileByPathSpec(tsk_path_spec, parser)
self.assertEqual(storage_writer.number_of_events, 3)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'fs:stat',
'date_time': '2012-05-25 16:00:53',
'display_name': 'TSK:/passwords.txt',
'file_entry_type': 'file',
'file_size': 116,
'file_system_type': 'EXT2',
'group_identifier': 5000,
'inode': 15,
'mode': 0o400,
'number_of_links': 1,
'owner_identifier': 151107,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testZipFile(self):
"""Test a ZIP file."""
parser = filestat.FileStatParser()
test_file_path = self._GetTestFilePath(['syslog.zip'])
self._SkipIfPathNotExists(test_file_path)
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
zip_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=os_path_spec)
storage_writer = self._ParseFileByPathSpec(zip_path_spec, parser)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'fs:stat',
'date_time': '2012-07-24 14:45:24',
'display_name': 'ZIP:/syslog',
'file_entry_type': 'file',
'file_size': 1247,
'file_system_type': 'ZIP',
'inode': None,
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testGzipFile(self):
"""Test a GZIP file."""
parser = filestat.FileStatParser()
test_file_path = self._GetTestFilePath(['syslog.gz'])
self._SkipIfPathNotExists(test_file_path)
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
gzip_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=os_path_spec)
storage_writer = self._ParseFileByPathSpec(gzip_path_spec, parser)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
test_path = os.path.join(shared_test_lib.TEST_DATA_PATH, 'syslog.gz')
expected_event_values = {
'data_type': 'fs:stat',
'date_time': '2012-07-28 16:44:07',
'display_name': 'GZIP:{0:s}'.format(test_path),
'file_entry_type': 'file',
'file_size': 1247,
'file_system_type': 'GZIP',
'inode': None,
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testTarFile(self):
"""Test a TAR file."""
parser = filestat.FileStatParser()
test_file_path = self._GetTestFilePath(['syslog.tar'])
self._SkipIfPathNotExists(test_file_path)
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
tar_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TAR, location='/syslog',
parent=os_path_spec)
storage_writer = self._ParseFileByPathSpec(tar_path_spec, parser)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'fs:stat',
'date_time': '2012-07-24 21:45:24',
'display_name': 'TAR:/syslog',
'file_entry_type': 'file',
'file_size': 1247,
'file_system_type': 'TAR',
'inode': None,
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testNestedFile(self):
"""Test a nested file."""
parser = filestat.FileStatParser()
test_file_path = self._GetTestFilePath(['syslog.tgz'])
self._SkipIfPathNotExists(test_file_path)
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
gzip_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=os_path_spec)
tar_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TAR, location='/syslog',
parent=gzip_path_spec)
storage_writer = self._ParseFileByPathSpec(tar_path_spec, parser)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'fs:stat',
'date_time': '2012-07-24 21:45:24',
'display_name': 'TAR:/syslog',
'file_entry_type': 'file',
'file_size': 1247,
'file_system_type': 'TAR',
'inode': None,
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
test_file_path = self._GetTestFilePath(['syslog.tgz'])
self._SkipIfPathNotExists(test_file_path)
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
gzip_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=os_path_spec)
storage_writer = self._ParseFileByPathSpec(gzip_path_spec, parser)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
test_path = os.path.join(shared_test_lib.TEST_DATA_PATH, 'syslog.tgz')
expected_event_values = {
'data_type': 'fs:stat',
'date_time': '2012-07-28 16:44:43',
'display_name': 'GZIP:{0:s}'.format(test_path),
'file_entry_type': 'file',
'file_size': 10240,
'file_system_type': 'GZIP',
'inode': None,
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testNestedTSK(self):
"""Test a nested TSK file."""
parser = filestat.FileStatParser()
test_file_path = self._GetTestFilePath(['syslog_image.dd'])
self._SkipIfPathNotExists(test_file_path)
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
tsk_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=11,
location='/logs/hidden.zip', parent=os_path_spec)
zip_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=tsk_path_spec)
storage_writer = self._ParseFileByPathSpec(zip_path_spec, parser)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'fs:stat',
'date_time': '2012-07-20 15:44:14',
'display_name': 'ZIP:/syslog',
'file_entry_type': 'file',
'file_size': 1247,
'file_system_type': 'ZIP',
'inode': None,
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
| 36.102662 | 75 | 0.722801 |
7958fa47094ea4edfa706034357cc9023a79bed0 | 7,192 | py | Python | tastypie/api.py | PyUnchained/django-tastypie | a2f3d0af111acc71a8954d47918560fdb7f906d7 | [
"BSD-3-Clause"
] | 1 | 2022-03-18T06:12:56.000Z | 2022-03-18T06:12:56.000Z | tastypie/api.py | PyUnchained/django-tastypie | a2f3d0af111acc71a8954d47918560fdb7f906d7 | [
"BSD-3-Clause"
] | null | null | null | tastypie/api.py | PyUnchained/django-tastypie | a2f3d0af111acc71a8954d47918560fdb7f906d7 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse, HttpResponseBadRequest
from tastypie.compat import reverse
from tastypie.exceptions import NotRegistered, BadRequest
from tastypie.serializers import Serializer
from tastypie.utils import is_valid_jsonp_callback_value, string_to_python, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.resources import Resource
from django.urls.conf import re_path, include
class Api(object):
"""
Implements a registry to tie together the various resources that make up
an API.
Especially useful for navigation, HATEOAS and for providing multiple
versions of your API.
Optionally supplying ``api_name`` allows you to name the API. Generally,
this is done with version numbers (i.e. ``v1``, ``v2``, etc.) but can
be named any string.
"""
def __init__(self, api_name="v1", serializer_class=Serializer):
self.api_name = api_name
self._registry = {}
self._canonicals = {}
self.serializer = serializer_class()
def register(self, resource, canonical=True):
"""
Registers an instance of a ``Resource`` subclass with the API.
Optionally accept a ``canonical`` argument, which indicates that the
resource being registered is the canonical variant. Defaults to
``True``.
"""
resource_name = getattr(resource._meta, 'resource_name', None)
if not isinstance(resource, Resource):
raise ValueError("An instance of ``Resource`` subclass should be passed in for %s" % resource_name)
if resource_name is None:
raise ImproperlyConfigured("Resource %r must define a 'resource_name'." % resource)
self._registry[resource_name] = resource
if canonical is True:
if resource_name in self._canonicals:
warnings.warn("A new resource '%r' is replacing the existing canonical URL for '%s'." % (resource, resource_name), Warning, stacklevel=2)
self._canonicals[resource_name] = resource
# TODO: This is messy, but makes URI resolution on FK/M2M fields
# work consistently.
resource._meta.api_name = self.api_name
resource.__class__.Meta.api_name = self.api_name
def unregister(self, resource_name):
"""
If present, unregisters a resource from the API.
"""
if resource_name in self._registry:
del(self._registry[resource_name])
if resource_name in self._canonicals:
del(self._canonicals[resource_name])
def canonical_resource_for(self, resource_name):
"""
Returns the canonical resource for a given ``resource_name``.
"""
if resource_name in self._canonicals:
return self._canonicals[resource_name]
raise NotRegistered("No resource was registered as canonical for '%s'." % resource_name)
def wrap_view(self, view):
def wrapper(request, *args, **kwargs):
try:
return getattr(self, view)(request, *args, **kwargs)
except BadRequest:
return HttpResponseBadRequest()
return wrapper
def override_urls(self):
"""
Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
"""
return []
def prepend_urls(self):
"""
A hook for adding your own URLs or matching before the default URLs.
"""
return []
@property
def urls(self):
"""
Provides URLconf details for the ``Api`` and all registered
``Resources`` beneath it.
"""
pattern_list = [
re_path(r"^(?P<api_name>%s)%s$" % (self.api_name, trailing_slash), self.wrap_view('top_level'), name="api_%s_top_level" % self.api_name),
]
for name in sorted(self._registry.keys()):
self._registry[name].api_name = self.api_name
pattern_list.append(re_path(r"^(?P<api_name>%s)/" % self.api_name, include(self._registry[name].urls)))
urlpatterns = self.prepend_urls()
overridden_urls = self.override_urls()
if overridden_urls:
warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
urlpatterns += overridden_urls
urlpatterns += pattern_list
return urlpatterns
def top_level(self, request, api_name=None):
"""
A view that returns a serialized list of all resources registers
to the ``Api``. Useful for discovery.
"""
fullschema = request.GET.get('fullschema', False)
fullschema = string_to_python(fullschema)
available_resources = {}
if api_name is None:
api_name = self.api_name
for name, resource in self._registry.items():
if not fullschema:
schema = self._build_reverse_url("api_get_schema", kwargs={
'api_name': api_name,
'resource_name': name,
})
else:
schema = resource.build_schema()
available_resources[name] = {
'list_endpoint': self._build_reverse_url("api_dispatch_list", kwargs={
'api_name': api_name,
'resource_name': name,
}),
'schema': schema,
}
desired_format = determine_format(request, self.serializer)
options = {}
if 'text/javascript' in desired_format:
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
serialized = self.serializer.serialize(available_resources, desired_format, options)
return HttpResponse(content=serialized, content_type=build_content_type(desired_format))
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedApi._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
class NamespacedApi(Api):
"""
An API subclass that respects Django namespaces.
"""
def __init__(self, api_name="v1", urlconf_namespace=None, **kwargs):
super(NamespacedApi, self).__init__(api_name=api_name, **kwargs)
self.urlconf_namespace = urlconf_namespace
def register(self, resource, canonical=True):
super(NamespacedApi, self).register(resource, canonical=canonical)
if canonical is True:
# Plop in the namespace here as well.
resource._meta.urlconf_namespace = self.urlconf_namespace
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
| 36.882051 | 153 | 0.640573 |
7958fa868f1321f368d780ca59e0dd18d6a43231 | 2,213 | py | Python | src/urtypes/cbor/data.py | selfcustody/urtypes | 8ff8e6ebe484d7a0f98ad73f4441708704998b43 | [
"MIT"
] | null | null | null | src/urtypes/cbor/data.py | selfcustody/urtypes | 8ff8e6ebe484d7a0f98ad73f4441708704998b43 | [
"MIT"
] | null | null | null | src/urtypes/cbor/data.py | selfcustody/urtypes | 8ff8e6ebe484d7a0f98ad73f4441708704998b43 | [
"MIT"
] | null | null | null | # The MIT License (MIT)
# Copyright (c) 2021 Tom J. Sun
# Copyright (c) 2015 Sokolov Yura
# Copyright (c) 2013 Fritz Grimpen
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# coding: utf-8
class Tagging(object):
__slots__ = ("tag", "obj")
def __init__(self, tag, obj):
self.tag = tag
self.obj = obj
def __eq__(self, other):
return (
isinstance(other, Tagging)
and self.tag == other.tag
and self.obj == other.obj
)
class Mapping(object):
__slots__ = "map"
def __init__(self, map):
self.map = map
def mapping(obj):
return Mapping(obj)
class DataItem(Tagging):
def __init__(self, tag, map):
super().__init__(tag, Mapping(map))
self.tag = tag
self.map = map
class _Undefined(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not isinstance(cls._instance, cls):
cls._instance = object.__new__(cls, *args, **kwargs)
return cls._instance
def __str__(self):
return "Undefined"
def __repr__(self):
return "Undefined"
Undefined = _Undefined()
__all__ = ["Tagging", "Mapping", "DataItem"]
| 28.74026 | 79 | 0.67962 |
7958faa2ac535c52c6a8534646c950febbd794e8 | 119 | py | Python | problem/10000~19999/14913/14913.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/10000~19999/14913/14913.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/10000~19999/14913/14913.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | a,d,n=map(int,input().split())
if (n-a)%d==0:
if d<0 and n<=a or d>0 and n>=a:
print((n-a)//d+1)
exit()
print('X') | 19.833333 | 33 | 0.521008 |
7958fb54ddf75dfdbf0ad0a40b06a8a153579fdc | 13,128 | py | Python | tests/framework/scheduler.py | mziyabo/firecracker | 92d4a2a5ae40df9927ee2838bd3a599554f2e67a | [
"Apache-2.0"
] | 1 | 2020-02-26T05:27:55.000Z | 2020-02-26T05:27:55.000Z | tests/framework/scheduler.py | mziyabo/firecracker | 92d4a2a5ae40df9927ee2838bd3a599554f2e67a | [
"Apache-2.0"
] | null | null | null | tests/framework/scheduler.py | mziyabo/firecracker | 92d4a2a5ae40df9927ee2838bd3a599554f2e67a | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Pytest plugin that schedules tests to run concurrently.
This plugin adds a new command line option (`--concurrency`), allowing the
user to choose the maximum number of worker processes that can run tests
concurrently.
Tests are split into batches, each batch being assigned a maximum concurrency
level. For instance, all performance tests will run sequentially
(i.e. concurrency=1), since they rely on the availability of the full host
resources, in order to make accurate measurements. Additionally, other tests
may be restricted to running sequentially, if they are per se
concurrency-unsafe. See `PytestScheduler.pytest_runtestloop()`.
Scheduling is achieved by overriding the pytest run loop (i.e.
`pytest_runtestloop()`), and splitting the test session item list across
multiple `fork()`ed worker processes. Since no user code is run before
`pytest_runtestloop()`, each worker becomes a pytest session itself.
Reporting is disabled for worker process, each worker sending its results
back to the main / server process, via an IPC pipe, for aggregation.
"""
import multiprocessing as mp
import os
import re
import sys
from random import random
from select import select
from time import sleep
import pytest
from _pytest.main import ExitCode
from . import mpsing # pylint: disable=relative-beyond-top-level
class PytestScheduler(mpsing.MultiprocessSingleton):
"""A pretty custom test execution scheduler."""
def __init__(self):
"""Initialize the scheduler.
Not to be called directly, since this is a singleton. Use
`PytestScheduler.instance()` to get the scheduler object.
"""
super().__init__()
self._mp_singletons = [self]
self.session = None
def register_mp_singleton(self, mp_singleton):
"""Register a multi-process singleton object.
Since the scheduler will be handling the main testing loop, it needs
to be aware of any multi-process singletons that must be serviced
during the test run (i.e. polled and allowed to handle method
execution in the server context).
"""
self._mp_singletons.append(mp_singleton)
@staticmethod
def do_pytest_addoption(parser):
"""Pytest hook. Add concurrency command line option."""
avail_cpus = len(os.sched_getaffinity(0))
# Defaulting to a third of the available (logical) CPUs sounds like a
# good enough plan.
default = max(1, int(avail_cpus / 3))
parser.addoption(
"--concurrency",
"--concurrency",
dest="concurrency",
action="store",
type=int,
default=default,
help="Concurrency level (max number of worker processes to spawn)."
)
def pytest_sessionstart(self, session):
"""Pytest hook. Called at pytest session start.
This will execute in the server context (before the tests are
executed).
"""
self.session = session
def pytest_runtest_logreport(self, report):
"""Pytest hook. Called whenever a new test report is ready.
This will execute in the worker / child context.
"""
self._add_report(report)
def pytest_runtestloop(self, session):
"""Pytest hook. The main test scheduling and running loop.
Called in the server process context.
"""
# Don't run tests on test discovery
if session.config.option.collectonly:
return True
max_concurrency = self.session.config.option.concurrency
schedule = [
{
# Performance batch: tests that measure performance, and need
# to be run in a non-cuncurrent environment.
'name': 'performance',
'concurrency': 1,
'patterns': [
"/performance/.+",
],
'items': []
},
{
# Unsafe batch: tests that, for any reason, are not
# concurrency-safe, and therefore need to be run sequentially.
'name': 'unsafe',
'concurrency': 1,
'patterns': [
"/functional/test_initrd.py",
"/functional/test_max_vcpus.py",
"/functional/test_rate_limiter.py",
"/functional/test_signals.py",
"/build/test_coverage.py"
],
'items': []
},
{
# Safe batch: tests that can be run safely in a concurrent
# environment.
'name': 'safe',
'concurrency': max_concurrency,
'patterns': [
"/functional/.+",
"/build/.+",
"/security/.+"
],
'items': []
},
{
# Unknown batch: a catch-all batch, scheduling any tests that
# haven't been categorized to run sequentially (since we don't
# know if they are concurrency-safe).
'name': 'unknown',
'concurrency': 1,
'patterns': [".+"],
'items': []
}
]
# Go through the list of tests and assign each of them to its
# corresponding batch in the schedule.
for item in session.items:
# A test can match any of the patterns defined by the batch,
# in order to get assigned to it.
next(
# Found a matching batch. No need to look any further.
batch['items'].append(item) for batch in schedule
if re.search(
"|".join(["({})".format(x) for x in batch['patterns']]),
"/".join(item.listnames()),
) is not None
)
# Filter out empty batches.
schedule = [batch for batch in schedule if batch['items']]
for batch in schedule:
self._raw_stdout(
"\n[ ",
self._colorize('yellow', batch['name']),
" | ",
"{} tests".format(len(batch['items'])),
" | ",
"{} worker(s)".format(batch['concurrency']),
" ]\n"
)
self._run_batch(batch)
return "stahp"
@pytest.mark.tryfirst
# pylint: disable=unused-argument
# pylint: disable=no-self-use
def pytest_sessionfinish(self, session, exitstatus):
"""Pytest hook. Wrap up the whole testing session.
Since the scheduler is more or less mangling the test session in order
to distribute test items to worker processes, the main pytest process
can become unaware of test failures and errors. Using this session
wrap-up hook to set the correct exit code.
"""
trep = session.config.pluginmanager.getplugin("terminalreporter")
if "error" in trep.stats:
session.exitstatus = ExitCode.INTERNAL_ERROR
if "failed" in trep.stats:
session.exitstatus = ExitCode.TESTS_FAILED
def _run_batch(self, batch):
"""Run the tests in this batch, spread across multiple workers.
Called in the server process context.
"""
max_workers = batch['concurrency']
items_per_worker = max(1, int(len(batch['items']) / max_workers))
workers = []
while batch['items']:
# Pop `items_per_worker` out from this batch and send them to
# a new worker.
worker_items = batch['items'][-items_per_worker:]
del batch['items'][-items_per_worker:]
# Avoid storming the host with too many workers started at once.
_delay = random() + len(workers) / 5.0 if max_workers > 1 else 0
# Create the worker process and start it up.
worker = mp.Process(
target=self._worker_main,
args=(worker_items, _delay)
)
workers.append(worker)
worker.start()
# Main loop, reaping workers and processing IPC requests.
while workers:
rlist, _, _ = select(self._mp_singletons, [], [], 0.1)
for mps in rlist:
mps.handle_ipc_call()
_ = [w.join() for w in workers if not w.is_alive()]
workers = [w for w in workers if w.is_alive()]
def _worker_main(self, items, startup_delay=0):
"""Execute a bunch of test items sequentially.
This is the worker process entry point and main loop.
"""
sys.stdin.close()
# Sleeping here to avoid storming the host when many workers are
# started at the same time.
#
# TODO: investigate storming issue;
# Not sure what the exact problem is, but worker storms cause an
# elevated response time on the API socket. Since the reponse
# time is measured by our decorators, it also includes the
# Python libraries overhead, which might be non-negligible.
sleep(startup_delay if startup_delay else 0)
# Restrict the session to this worker's item list only.
# I.e. make pytest believe that the test session is limited to this
# worker's job.
self.session.items = items
# Disable the terminal reporting plugin, so it doesn't mess up
# stdout, when run in a multi-process context.
# The terminal reporter plugin will remain enabled in the server
# process, gathering data via worker calls to `_add_report()`.
trep = self.session.config.pluginmanager.get_plugin("terminalreporter")
self.session.config.pluginmanager.unregister(trep)
for item, nextitem in zip(
self.session.items,
self.session.items[1:] + [None]
):
item.ihook.pytest_runtest_protocol(item=item, nextitem=nextitem)
@mpsing.ipcmethod
def _add_report(self, report):
"""Send a test report to the server process.
A report is generated for every test item, and for every test phase
(setup, call, and teardown).
"""
# Translation matrix from (when)x(outcome) to pytest's
# terminalreporter plugin stats (dictionary) key.
key_xlat = {
"setup.passed": "",
"setup.failed": "error",
"setup.skipped": "skipped",
"call.passed": "passed",
"call.failed": "failed",
"call.skipped": "skipped",
"teardown.passed": "",
"teardown.failed": "error",
"teardown.skipped": ""
}
stats_key = key_xlat["{}.{}".format(report.when, report.outcome)]
trep = self.session.config.pluginmanager.get_plugin("terminalreporter")
if trep:
if stats_key not in trep.stats:
trep.stats[stats_key] = []
trep.stats[stats_key].append(report)
if stats_key:
self._report_progress(report.nodeid, stats_key)
def _report_progress(self, nodeid, outcome):
"""Show the user some nice progress indication."""
outcome_cols = {
"passed": "green",
"failed": "red",
"error": "red",
"skipped": "yellow"
}
if outcome not in outcome_cols:
return
color = outcome_cols[outcome]
self._raw_stdout(
" ",
self._colorize(color, "{:10}".format(outcome.upper())),
self._colorize(color, nodeid)
if outcome in ["error", "failed"]
else nodeid,
"\n"
)
@staticmethod
def _colorize(color, msg):
"""Add an ANSI / terminal color escape code to `msg`.
If stdout is not a terminal, `msg` will just be encoded into a byte
stream, without adding any ANSI decorations.
Note: the returned value will always be a stream of bytes, not a
string, since the result needs to be sent straight to the
terminal.
"""
if not isinstance(msg, bytes):
msg = str(msg).encode("utf-8")
if not sys.stdout.isatty():
return msg
term_codes = {
'red': b"\x1b[31m",
'yellow': b"\x1b[33m",
'green': b"\x1b[32m",
'reset': b"\x1b(B\x1b[m"
}
return term_codes[color] + msg + term_codes['reset']
@staticmethod
def _raw_stdout(*words):
"""Send raw-byte output to stdout.
All arguments are concatenated and, if necessary, encoded into raw
byte streams, before being written to stdout.
"""
byte_words = [
w if isinstance(w, bytes) else str(w).encode("utf-8")
for w in words
]
buf = b"".join(byte_words)
os.write(sys.stdout.fileno(), buf)
| 37.295455 | 79 | 0.577544 |
7958fba9631dd8e36727ee5bf3eeea65bcd15ca7 | 976 | py | Python | run.py | nedludd0/python-base | b5d825a5e056d15295e8d7c7ad5e37f20fdeb7cd | [
"MIT"
] | 1 | 2020-05-16T11:36:37.000Z | 2020-05-16T11:36:37.000Z | run.py | nedludd0/python-base | b5d825a5e056d15295e8d7c7ad5e37f20fdeb7cd | [
"MIT"
] | null | null | null | run.py | nedludd0/python-base | b5d825a5e056d15295e8d7c7ad5e37f20fdeb7cd | [
"MIT"
] | null | null | null | """
There are four collection data types in the Python programming language:
- LIST is a collection which is ordered and changeable. Allows duplicate members.
- TUPLE is a collection which is ordered and unchangeable. Allows duplicate members.
- SET is a collection which is unordered and unindexed. No duplicate members.
- DICTIONARY is a collection which is unordered, changeable and indexed. No duplicate members.
"""
import my_list
import my_tuple
import my_set
import my_dict
def main(_choose):
if _choose == 'list':
my_list.study()
elif _choose == 'tuple':
my_tuple.study()
elif _choose == 'set':
my_set.study()
elif _choose == 'dict':
my_dict.study()
else:
print("But what did you choose ?? :{}".format(_choose))
if __name__ == "__main__":
_choose = input("Choose the structure to study (list, tuple, set, dict): ")
main(_choose)
| 28.705882 | 101 | 0.645492 |
7958fbb21c8c01dbafc491118d1c11230aa2b8cf | 1,574 | py | Python | touchpoint/location.py | zappospizza/touchpoint-python | 19572c0c1360408dd980ed95e852046dcdba3623 | [
"MIT"
] | null | null | null | touchpoint/location.py | zappospizza/touchpoint-python | 19572c0c1360408dd980ed95e852046dcdba3623 | [
"MIT"
] | null | null | null | touchpoint/location.py | zappospizza/touchpoint-python | 19572c0c1360408dd980ed95e852046dcdba3623 | [
"MIT"
] | null | null | null | # touchpoint/location.py
'''This module defines a location.'''
class Location():
'''
The Location object contains a location's details
:param location_id: Used to identify a given location
:type location_id: str
:param wss_url: Used to define a location's wss url
:type wss_url: str
:param default_order_notes: Used to define a location's default order notes
:type default_order_notes: list
:param name: Used to define a location's name
:type name: str
:param address: Used to define a location's address
:type address: str
:param phone: Used to define a location's phone
:type phone: str
'''
def __init__(self, location_id, wss_url=None, default_order_notes=None,
name=None, address=None, phone=None):
if default_order_notes is None:
self.default_order_notes = []
else:
self.default_order_notes = default_order_notes
self.location_id = location_id
self.wss_url = wss_url
self.name = name
self.address = address
self.phone = phone
def info(self):
'''
Returns a location's info.
:returns: A location's info
:rtype: dict
'''
return {'location_id': self.location_id,
'wss_url': self.wss_url,
'default_order_notes': self.default_order_notes,
'name': self.name,
'address': self.address,
'phone': self.phone}
| 28.618182 | 83 | 0.588945 |
7958fc1e5cd366e374d9758a62d641f5cea4b043 | 269 | py | Python | qct_energy/config/desktop.py | Suraj787/qct_energy | a1969a5236ade31a08bc049b2d42f6a3458c1e1d | [
"MIT"
] | null | null | null | qct_energy/config/desktop.py | Suraj787/qct_energy | a1969a5236ade31a08bc049b2d42f6a3458c1e1d | [
"MIT"
] | null | null | null | qct_energy/config/desktop.py | Suraj787/qct_energy | a1969a5236ade31a08bc049b2d42f6a3458c1e1d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "qct energy",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("qct energy")
}
]
| 17.933333 | 44 | 0.613383 |
7958fc46c44af43489e17f00403817a6075629dd | 1,166 | py | Python | tests.py | correa-coder/console_menu | 0bbf1a0d7d027d68f78187b8a6db9ce463a671bd | [
"MIT"
] | null | null | null | tests.py | correa-coder/console_menu | 0bbf1a0d7d027d68f78187b8a6db9ce463a671bd | [
"MIT"
] | null | null | null | tests.py | correa-coder/console_menu | 0bbf1a0d7d027d68f78187b8a6db9ce463a671bd | [
"MIT"
] | null | null | null | import unittest
from package import menu
class TestText(unittest.TestCase):
def test_text_output(self):
print('-' * 32)
print("testing menu.Text")
texts = [
menu.Text('Success', 'green'),
menu.Text('Info', 'blue'),
menu.Text('Error', 'red')
]
for text in texts:
text.show()
def test_colorize_func(self):
self.assertEqual(menu.Text.colorize('test', 'red'), '\033[31mtest\033[m')
self.assertEqual(menu.Text.colorize('test', 'green'), '\033[32mtest\033[m')
self.assertEqual(menu.Text.colorize('test', 'blue'), '\033[34mtest\033[m')
def test_menu_item(self):
print('-' * 32)
print("testing menu.MenuItem")
menu_items = [
menu.MenuItem('Strawberry', color='red'),
menu.MenuItem('Avocado', lambda: print("This is an Avocado"), color='green'),
menu.MenuItem('Mango', [], color='blue'), # not a function passed to self.func (edge case)
]
for item in menu_items:
item.show()
item.run()
if __name__ == '__main__':
unittest.main()
| 28.439024 | 102 | 0.557461 |
7958fcc6c64cd3ed9f80fcc6dabf2109bbddd58a | 118 | py | Python | lib/galaxy/version.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/version.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/version.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | null | null | null | VERSION_MAJOR = "17.01"
VERSION_MINOR = None
VERSION = VERSION_MAJOR + ('.' + VERSION_MINOR if VERSION_MINOR else '')
| 29.5 | 72 | 0.728814 |
7958fd3e335e3610976a4a76a66ff29d4fec6333 | 183 | py | Python | kinton/file_system.py | the-cocktail/kinton | e43c22260064a22cb90d30cf856e874a91ea9587 | [
"MIT"
] | 2 | 2018-02-06T09:08:29.000Z | 2018-09-24T08:06:16.000Z | kinton/file_system.py | the-cocktail/kinton | e43c22260064a22cb90d30cf856e874a91ea9587 | [
"MIT"
] | 1 | 2018-09-26T12:18:18.000Z | 2018-09-26T12:18:18.000Z | kinton/file_system.py | the-cocktail/kinton | e43c22260064a22cb90d30cf856e874a91ea9587 | [
"MIT"
] | 1 | 2018-01-29T17:09:02.000Z | 2018-01-29T17:09:02.000Z | import os
import shutil
class FileSystem:
def create_folder(path):
if not os.path.exists(path):
os.makedirs(path)
def remove_folder(path):
shutil.rmtree(path) | 16.636364 | 32 | 0.688525 |
7958fe7bcc628a6d158a5b17d14df3e3c75d0ae5 | 2,209 | py | Python | examples/003_evaluation_one_dataset.py | szghlm/smote_variants | 9066ddbd526b18bb273746c1b989e8e07a35abd2 | [
"MIT"
] | 271 | 2020-01-18T09:04:35.000Z | 2022-03-31T11:49:12.000Z | examples/003_evaluation_one_dataset.py | szghlm/smote_variants | 9066ddbd526b18bb273746c1b989e8e07a35abd2 | [
"MIT"
] | 19 | 2020-05-04T18:24:03.000Z | 2022-03-21T23:44:43.000Z | examples/003_evaluation_one_dataset.py | szghlm/smote_variants | 9066ddbd526b18bb273746c1b989e8e07a35abd2 | [
"MIT"
] | 70 | 2020-01-18T15:01:43.000Z | 2022-03-28T15:10:19.000Z |
# coding: utf-8
# # Evaluation of oversamplers with a set of classifiers on one database
#
# In this notebook we give an example of optimizing oversamplers and classifiers for given dataset.
# In[1]:
import os.path
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import smote_variants as sv
import imbalanced_databases as imbd
# In[2]:
# the evaluation procedure uses a directory for caching
cache_path= os.path.join(os.path.expanduser('~'), 'smote_test')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# In[3]:
# specifying the dataset to be used
dataset= imbd.load_glass0()
# In[4]:
# specifying the classifiers
knn_classifier= KNeighborsClassifier()
dt_classifier= DecisionTreeClassifier()
# In[5]:
# executing the evaluation using 5 parallel jobs and at most 35 random but meaningful parameter combinations
# with the 5 quickest oversamplers
results= sv.evaluate_oversamplers(datasets= [dataset],
samplers= sv.get_n_quickest_oversamplers(5),
classifiers= [knn_classifier, dt_classifier],
cache_path= cache_path,
n_jobs= 5,
max_samp_par_comb= 35)
# In[6]:
# determining oversampler and classifier combination with highest AUC score
highest_auc_score= results['auc'].idxmax()
# In[7]:
# querying classifier and oversampler parameters with highest AUC score
cl, cl_par, samp, samp_par= results.loc[highest_auc_score][['classifier',
'classifier_parameters_auc',
'sampler',
'sampler_parameters_auc']]
# In[8]:
# instantiating oversampler and classifier objects providing the highest AUC score
samp_obj= getattr(sv, samp)(**eval(samp_par))
cl_obj= eval(cl)(**eval(cl_par))
# In[9]:
# oversampling the entire dataset and fitting a classifier
X_samp, y_samp= samp_obj.sample(dataset['data'], dataset['target'])
cl_obj.fit(X_samp, y_samp)
| 22.313131 | 108 | 0.641919 |
7958feea9ffec546fc256084af68e1badac7611d | 494 | py | Python | ACME/math/linint.py | mauriziokovacic/ACME | 2615b66dd4addfd5c03d9d91a24c7da414294308 | [
"MIT"
] | 3 | 2019-10-23T23:10:55.000Z | 2021-09-01T07:30:14.000Z | ACME/math/linint.py | mauriziokovacic/ACME-Python | 2615b66dd4addfd5c03d9d91a24c7da414294308 | [
"MIT"
] | null | null | null | ACME/math/linint.py | mauriziokovacic/ACME-Python | 2615b66dd4addfd5c03d9d91a24c7da414294308 | [
"MIT"
] | 1 | 2020-07-11T11:35:43.000Z | 2020-07-11T11:35:43.000Z | import torch
from ..utility.istorch import *
def linint(A, B, t):
"""
Computes the linear interpolation between the two input at the specified parameter
Parameters
----------
A : Tensor
the first input tensor
B : Tensor
the second input tensor
t : float
the interpolation parameter
Returns
-------
Tensor
the inpterpolated value
"""
if istorch(A, B):
return torch.lerp(A, B, t)
return A*(1-t)+B*t
| 18.296296 | 86 | 0.578947 |
7958ff306d8e6a38ae2771ed853bdc831406b360 | 6,743 | py | Python | ckan_cloud_operator/providers/cluster/aws/manager.py | mickeyrouash/ckan-cloud-operator | 10e38f13964af30fe57b07e8d8a3b7521ed69cc2 | [
"MIT"
] | null | null | null | ckan_cloud_operator/providers/cluster/aws/manager.py | mickeyrouash/ckan-cloud-operator | 10e38f13964af30fe57b07e8d8a3b7521ed69cc2 | [
"MIT"
] | null | null | null | ckan_cloud_operator/providers/cluster/aws/manager.py | mickeyrouash/ckan-cloud-operator | 10e38f13964af30fe57b07e8d8a3b7521ed69cc2 | [
"MIT"
] | null | null | null | #### standard provider code ####
# import the correct PROVIDER_SUBMODULE and PROVIDER_ID constants for your provider
from .constants import PROVIDER_ID
from ..constants import PROVIDER_SUBMODULE
# define common provider functions based on the constants
from ckan_cloud_operator.providers import manager as providers_manager
def _get_resource_name(suffix=None): return providers_manager.get_resource_name(PROVIDER_SUBMODULE, PROVIDER_ID, suffix=suffix)
def _get_resource_labels(for_deployment=False): return providers_manager.get_resource_labels(PROVIDER_SUBMODULE, PROVIDER_ID, for_deployment=for_deployment)
def _get_resource_annotations(suffix=None): return providers_manager.get_resource_annotations(PROVIDER_SUBMODULE, PROVIDER_ID, suffix=suffix)
def _set_provider(): providers_manager.set_provider(PROVIDER_SUBMODULE, PROVIDER_ID)
def _config_set(key=None, value=None, values=None, namespace=None, is_secret=False, suffix=None): providers_manager.config_set(PROVIDER_SUBMODULE, PROVIDER_ID, key=key, value=value, values=values, namespace=namespace, is_secret=is_secret, suffix=suffix)
def _config_get(key=None, default=None, required=False, namespace=None, is_secret=False, suffix=None): return providers_manager.config_get(PROVIDER_SUBMODULE, PROVIDER_ID, key=key, default=default, required=required, namespace=namespace, is_secret=is_secret, suffix=suffix)
def _config_interactive_set(default_values, namespace=None, is_secret=False, suffix=None, from_file=False): providers_manager.config_interactive_set(PROVIDER_SUBMODULE, PROVIDER_ID, default_values, namespace, is_secret, suffix, from_file)
################################
# custom provider code starts here
#
import yaml
import json
import subprocess
import datetime
import os
import binascii
import collections
from ckan_cloud_operator import kubectl
from ckan_cloud_operator import logs
def initialize(interactive=False):
_set_provider()
if interactive:
print('\nEnter credentials for an AWS Access Key with relevant permissions\n')
_config_interactive_set({'aws-access-key-id': None}, is_secret=True)
_config_interactive_set({'aws-secret-access-key': None}, is_secret=True)
print('\nEnter the AWS Region the Amazone Kubernets cluster is hosted on\n')
_config_interactive_set({'aws-default-region': None}, is_secret=True)
print('\nEnter the name of your Amazon EKS cluster\n')
_config_interactive_set({'eks-cluster-name': None}, is_secret=True)
print(yaml.dump(get_info(), default_flow_style=False))
def get_info(debug=False):
cluster = _config_get('eks-cluster-name', is_secret=True)
data = yaml.load(aws_check_output(f'eks describe-cluster --name {cluster}'))['cluster']
if debug:
return data
else:
return {
'name': data['name'],
'status': data['status'],
# 'zone': data['zone'],
# 'locations': data['locations'],
'endpoint': data['endpoint'],
# 'nodePools': [
# {
# 'name': pool['name'],
# 'status': pool['status'],
# 'version': pool['version'],
# 'config': {
# 'diskSizeGb': pool['config']['diskSizeGb'],
# 'machineType': pool['config']['machineType'],
# },
# } for pool in data['nodePools']
# ],
'createTime': datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + datetime.timedelta(seconds=float(data['createdAt'])),
'currentMasterVersion': data['version'],
# 'currentNodeCount': data['currentNodeCount'],
}
def aws_check_output(cmd):
access = _config_get('aws-access-key-id', is_secret=True)
secret = _config_get('aws-secret-access-key', is_secret=True)
region = _config_get('aws-default-region', is_secret=True)
return subprocess.check_output(f"AWS_ACCESS_KEY_ID={access} AWS_SECRET_ACCESS_KEY={secret} AWS_DEFAULT_REGION={region} aws {cmd}", shell=True)
def exec(cmd):
access = _config_get('aws-access-key-id', is_secret=True)
secret = _config_get('aws-secret-access-key', is_secret=True)
region = _config_get('aws-default-region', is_secret=True)
subprocess.check_call(
f"AWS_ACCESS_KEY_ID={access} AWS_SECRET_ACCESS_KEY={secret} AWS_DEFAULT_REGION={region} aws {cmd}",
shell=True
)
def create_volume(disk_size_gb, labels, use_existing_disk_name=None):
assert not use_existing_disk_name, 'using existing disk name is not supported yet'
availability_zone = get_storage_availability_zone()
logs.info(f'creating persistent disk with size {disk_size_gb} in availability zone {availability_zone}')
data = json.loads(aws_check_output(f'ec2 create-volume -- --size {disk_size_gb} --availability-zone {availability_zone}'))
volume_id = data['VolumeId']
logs.info(f'volume_id={volume_id}')
kubectl.apply({
'apiVersion': 'v1', 'kind': 'PersistentVolume',
'metadata': {'name': volume_id, 'namespace': 'ckan-cloud'},
'spec': {
'storageClassName': '',
'capacity': {'storage': f'{disk_size_gb}G'},
'accessModes': ['ReadWriteOnce'],
'awsElasticBlockStore': {'volumeID': volume_id}
}
})
kubectl.apply({
'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim',
'metadata': {'name': volume_id, 'namespace': 'ckan-cloud'},
'spec': {
'storageClassName': '',
'volumeName': volume_id,
'accessModes': ['ReadWriteOnce'],
'resources': {'requests': {'storage': f'{disk_size_gb}G'}}
}
})
return {
'persistentVolumeClaim': {
'claimName': volume_id
},
'nodeSelector': {
'failure-domain.beta.kubernetes.io/zone': availability_zone
}
}
def get_storage_availability_zone():
return _config_get('default-storage-availability-zone')
def set_storage_availability_zone(zone):
return _config_set('default-storage-availability-zone', zone)
def auto_get_availability_zone():
print('getting availability zone with most nodes in the cluster')
zones = collections.defaultdict(int)
for node in kubectl.get('nodes')['items']:
zones[node['metadata']['labels']['failure-domain.beta.kubernetes.io/zone']] += 1
return sorted([{'zone': zone, 'nodes': nodes} for zone, nodes in zones.items()],
key=lambda item: item['nodes'], reverse=True)[0]['zone']
def get_name():
name = _config_get('cluster-name')
if not name:
name = get_info()['name']
_config_set('cluster-name', name)
return name
| 43.785714 | 273 | 0.677888 |
7959003e6e94c565cba18407e915b39e1e488a7f | 6,843 | py | Python | cohesity_management_sdk/http/http_client.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/http/http_client.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/http/http_client.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
from cohesity_management_sdk.http.http_method_enum import HttpMethodEnum
from cohesity_management_sdk.http.http_request import HttpRequest
class HttpClient(object):
"""An interface for the methods that an HTTP Client must implement
This class should not be instantiated but should be used as a base class
for HTTP Client classes.
"""
def execute_as_string(self, request):
"""Execute a given HttpRequest to get a string response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
raise NotImplementedError("Please Implement this method")
def execute_as_binary(self, request):
"""Execute a given HttpRequest to get a binary response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
raise NotImplementedError("Please Implement this method")
def convert_response(self, response, binary):
"""Converts the Response object of the HttpClient into an
HttpResponse object.
Args:
response (dynamic): The original response object.
Returns:
HttpResponse: The converted HttpResponse object.
"""
raise NotImplementedError("Please Implement this method")
def get(self, query_url,
headers={},
query_parameters={}):
"""Create a simple GET HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.GET,
query_url,
headers,
query_parameters,
None,
None)
def head(self, query_url,
headers={},
query_parameters={}):
"""Create a simple HEAD HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.HEAD,
query_url,
headers,
query_parameters,
None,
None)
def post(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple POST HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
parameters (dict, optional): Form or body parameters to be included in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.POST,
query_url,
headers,
query_parameters,
parameters,
files)
def put(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple PUT HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
parameters (dict, optional): Form or body parameters to be included in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.PUT,
query_url,
headers,
query_parameters,
parameters,
files)
def patch(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple PATCH HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
parameters (dict, optional): Form or body parameters to be included in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.PATCH,
query_url,
headers,
query_parameters,
parameters,
files)
def delete(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple DELETE HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
parameters (dict, optional): Form or body parameters to be included in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.DELETE,
query_url,
headers,
query_parameters,
parameters,
files)
| 35.092308 | 92 | 0.555897 |
795902765be5618c63ab684d03e9e95f556ed8ff | 190 | py | Python | flying_bear_bot/urls.py | wolendranh/flying_bear_bot | 0b8a8ad47a744146e59cc218dc6d6547fb9b6c0e | [
"Apache-2.0"
] | 1 | 2018-12-28T13:58:56.000Z | 2018-12-28T13:58:56.000Z | flying_bear_bot/urls.py | wolendranh/flying_bear_bot | 0b8a8ad47a744146e59cc218dc6d6547fb9b6c0e | [
"Apache-2.0"
] | 12 | 2019-12-04T22:16:35.000Z | 2022-03-12T00:50:02.000Z | flying_bear_bot/urls.py | wolendranh/flying_bear_bot | 0b8a8ad47a744146e59cc218dc6d6547fb9b6c0e | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^bot_register/', include('bot_register.urls')),
]
| 23.75 | 57 | 0.705263 |
79590277b52e59f190bbaf0c7423f80d7cbd2b19 | 667 | py | Python | src/match_modality/methods/dummy_constant/script.py | hhuuggoo/neurips2021_multimodal_viash | f17af09a34ecac6309bcd7b5f8f5122e09318e0c | [
"MIT"
] | null | null | null | src/match_modality/methods/dummy_constant/script.py | hhuuggoo/neurips2021_multimodal_viash | f17af09a34ecac6309bcd7b5f8f5122e09318e0c | [
"MIT"
] | null | null | null | src/match_modality/methods/dummy_constant/script.py | hhuuggoo/neurips2021_multimodal_viash | f17af09a34ecac6309bcd7b5f8f5122e09318e0c | [
"MIT"
] | null | null | null | import anndata
import numpy as np
# VIASH START
par = {
"input_mod1": "resources_test/match_modality/test_resource.mod1.h5ad",
"input_mod2": "resources_test/match_modality/test_resource.mod2.h5ad",
"output": "resources_test/match_modality/test_resource.prediction.h5ad",
}
# VIASH END
# load dataset to be censored
ad_mod1 = anndata.read_h5ad(par["input_mod1"])
ad_mod2 = anndata.read_h5ad(par["input_mod2"])
# Write out prediction
prediction = anndata.AnnData(
X=np.ones((ad_mod1.n_obs, ad_mod2.n_obs)),
uns={
"method_id": "dummy_constant",
"dataset_id": ad_mod1.uns["dataset_id"],
}
)
prediction.write_h5ad(par["output"])
| 26.68 | 76 | 0.721139 |
795902841a8a600a7373c64060cdd70153cd1ac0 | 12,113 | py | Python | gamestonk_terminal/fundamental_analysis/market_watch_api.py | sandsturm/GamestonkTerminal | 1969ff3b251711099a448024ec71e5b4e50413f7 | [
"MIT"
] | null | null | null | gamestonk_terminal/fundamental_analysis/market_watch_api.py | sandsturm/GamestonkTerminal | 1969ff3b251711099a448024ec71e5b4e50413f7 | [
"MIT"
] | null | null | null | gamestonk_terminal/fundamental_analysis/market_watch_api.py | sandsturm/GamestonkTerminal | 1969ff3b251711099a448024ec71e5b4e50413f7 | [
"MIT"
] | null | null | null | import argparse
import requests
import pandas as pd
from bs4 import BeautifulSoup
from gamestonk_terminal.helper_funcs import (
get_user_agent,
parse_known_args_and_warn,
patch_pandas_text_adjustment,
financials_colored_values,
)
from gamestonk_terminal import feature_flags as gtff
def income(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="income",
description="""
Prints either yearly or quarterly income statement the company. The following fields
are expected: Sales Growth, Cost of Goods Sold (COGS) incl. D&A, COGS Growth, COGS
excluding D&A, Depreciation & Amortization Expense, Depreciation, Amortization of
Intangibles, Gross Income, Gross Income Growth, Gross Profit Margin, SG&A Expense, SGA
Growth, Research & Development, Other SG&A, Other Operating Expense, Unusual Expense,
EBIT after Unusual Expense, Non Operating Income/Expense, Non-Operating Interest
Income, Equity in Affiliates (Pretax), Interest Expense, Interest Expense Growth,
Gross Interest Expense, Interest Capitalized, Pretax Income, Pretax Income Growth,
Pretax Margin, Income Tax, Income Tax - Current Domestic, Income Tax - Current Foreign,
Income Tax - Deferred Domestic, Income Tax - Deferred Foreign, Income Tax Credits,
Equity in Affiliates, Other After Tax Income (Expense), Consolidated Net Income,
Minority Interest Expense, Net Income Growth, Net Margin Growth, Extraordinaries &
Discontinued Operations, Extra Items & Gain/Loss Sale Of Assets, Cumulative Effect -
Accounting Chg, Discontinued Operations, Net Income After Extraordinaries,
Preferred Dividends, Net Income Available to Common, EPS (Basic), EPS (Basic) Growth,
Basic Shares Outstanding, EPS (Diluted), EPS (Diluted) Growth, Diluted Shares
Outstanding, EBITDA, EBITDA Growth, EBITDA Margin, Sales/Revenue, and Net Income.
[Source: Market Watch]
""",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter fundamental data flag.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
if ns_parser.b_quarter:
url_financials = f"https://www.marketwatch.com/investing/stock/{s_ticker}/financials/income/quarter"
else:
url_financials = f"https://www.marketwatch.com/investing/stock/{s_ticker}/financials/income"
df_financials = prepare_df_financials(url_financials, ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option("display.max_colwidth", None)
pd.set_option("display.max_rows", None)
print(df_financials.to_string(index=False))
print("")
except Exception as e:
print(e)
print("")
return
def balance(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="balance",
description="""
Prints either yearly or quarterly assets from balance sheet of the company.
The following fields are expected: Cash & Short Term Investments, Cash & Short Term
Investments Growth, Cash Only, Short-Term Investments, Cash & ST Investments / Total
Assets, Total Accounts Receivable, Total Accounts Receivable Growth, Accounts
Receivables, Net, Accounts Receivables, Gross, Bad Debt/Doubtful Accounts, Other
Receivable, Accounts Receivable Turnover, Inventories, Finished Goods, Work in
Progress, Raw Materials, Progress Payments & Other, Other Current Assets,
Miscellaneous Current Assets, Net Property, Plant & Equipment, Property, Plant &
Equipment - Gross, Buildings, Land & Improvements, Computer Software and Equipment,
Other Property, Plant & Equipment, Accumulated Depreciation, Total Investments and
Advances, Other Long-Term Investments, Long-Term Note Receivables, Intangible Assets,
Net Goodwill, Net Other Intangibles, Other Assets [Source: Market Watch]
Prints either yearly or quarterly liabilities and shareholders' equity from balance
sheet of the company. The following fields are expected: ST Debt & Current Portion LT
Debt, Short Term Debt, Current Portion of Long Term Debt, Accounts Payable, Accounts
Payable Growth, Income Tax Payable, Other Current Liabilities, Dividends Payable,
Accrued Payroll, Miscellaneous Current Liabilities, Long-Term Debt, Long-Term Debt
excl. Capitalized Leases, Non-Convertible Debt, Convertible Debt, Capitalized Lease
Obligations, Provision for Risks & Charges, Deferred Taxes, Deferred Taxes - Credits,
Deferred Taxes - Debit, Other Liabilities, Other Liabilities (excl. Deferred Income),
Deferred Income, Non-Equity Reserves, Total Liabilities / Total Assets, Preferred Stock
(Carrying Value), Redeemable Preferred Stock, Non-Redeemable Preferred Stock, Common
Equity (Total), Common Equity/Total Assets, Common Stock Par/Carry Value, Retained
Earnings, ESOP Debt Guarantee, Cumulative Translation Adjustment/Unrealized For. Exch.
Gain, Unrealized Gain/Loss Marketable Securities, Revaluation Reserves, Treasury Stock,
Total Shareholders' Equity, Total Shareholders' Equity / Total Assets, Accumulated
Minority Interest, Total Equity, Total Current Assets, Total Assets, Total Current
Liabilities, Total Liabilities, and Liabilities & Shareholders' Equity. [Source: Market
Watch]
""",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter fundamental data flag.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
if ns_parser.b_quarter:
url_financials = f"https://www.marketwatch.com/investing/stock/{s_ticker}/financials/balance-sheet/quarter"
else:
url_financials = f"https://www.marketwatch.com/investing/stock/{s_ticker}/financials/balance-sheet"
df_financials = prepare_df_financials(url_financials, ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option("display.max_colwidth", None)
pd.set_option("display.max_rows", None)
print(df_financials.to_string(index=False))
print("")
except Exception as e:
print(e)
print("")
return
def cash(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="cash_flow",
description="""
Prints either yearly or quarterly cash flow operating activities of the company.
The following fields are expected: Net Income before Extraordinaries, Net Income
Growth, Depreciation, Depletion & Amortization, Depreciation and Depletion,
Amortization of Intangible Assets, Deferred Taxes & Investment Tax Credit, Deferred
Taxes, Investment Tax Credit, Other Funds, Funds from Operations, Extraordinaries,
Changes in Working Capital, Receivables, Accounts Payable, Other Assets/Liabilities,
and Net Operating Cash Flow Growth. [Source: Market Watch]
Prints either yearly or quarterly cash flow investing activities of the company.
The following fields are expected: Capital Expenditures, Capital Expenditures Growth,
Capital Expenditures/Sales, Capital Expenditures (Fixed Assets), Capital Expenditures
(Other Assets), Net Assets from Acquisitions, Sale of Fixed Assets & Businesses,
Purchase/Sale of Investments, Purchase of Investments, Sale/Maturity of Investments,
Other Uses, Other Sources, Net Investing Cash Flow Growth. [Source: Market Watch]
Prints either yearly or quarterly cash flow financing activities of the company.
The following fields are expected: Cash Dividends Paid - Total, Common Dividends,
Preferred Dividends, Change in Capital Stock, Repurchase of Common & Preferred Stk.,
Sale of Common & Preferred Stock, Proceeds from Stock Options, Other Proceeds from Sale
of Stock, Issuance/Reduction of Debt, Net, Change in Current Debt, Change in Long-Term
Debt, Issuance of Long-Term Debt, Reduction in Long-Term Debt, Other Funds, Other Uses,
Other Sources, Net Financing Cash Flow Growth, Net Financing Cash Flow/Sales, Exchange
Rate Effect, Miscellaneous Funds, Net Change in Cash, Free Cash Flow, Free Cash Flow
Growth, Free Cash Flow Yield, Net Operating Cash Flow, Net Investing Cash Flow, Net
Financing Cash Flow [Source: Market Watch]
""",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter fundamental data flag.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
if ns_parser.b_quarter:
url_financials = f"https://www.marketwatch.com/investing/stock/{s_ticker}/financials/cash-flow/quarter"
else:
url_financials = f"https://www.marketwatch.com/investing/stock/{s_ticker}/financials/cash-flow"
df_financials = prepare_df_financials(url_financials, ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option("display.max_colwidth", None)
pd.set_option("display.max_rows", None)
print(df_financials.to_string(index=False))
print("")
except Exception as e:
print(e)
print("")
return
def prepare_df_financials(url_financials: str, quarter: bool) -> pd.DataFrame:
text_soup_financials = BeautifulSoup(
requests.get(url_financials, headers={"User-Agent": get_user_agent()}).text,
"lxml",
)
# Define financials columns
a_financials_header = list()
for financials_header in text_soup_financials.findAll(
"th", {"class": "overflow__heading"}
):
a_financials_header.append(financials_header.text.strip("\n").split("\n")[0])
s_header_end_trend = ("5-year trend", "5- qtr trend")[quarter]
df_financials = pd.DataFrame(
columns=a_financials_header[0 : a_financials_header.index(s_header_end_trend)]
)
find_table = text_soup_financials.findAll(
"div", {"class": "element element--table table--fixed financials"}
)
if not find_table:
return df_financials
financials_rows = find_table[0].findAll(
"tr", {"class": ["table__row is-highlighted", "table__row"]}
)
for a_row in financials_rows:
constructed_row = list()
financial_columns = a_row.findAll(
"td", {"class": ["overflow__cell", "overflow__cell fixed--column"]}
)
if not financial_columns:
continue
for a_column in financial_columns:
column_to_text = a_column.text.strip()
if "\n" in column_to_text:
column_to_text = column_to_text.split("\n")[0]
if column_to_text == "":
continue
constructed_row.append(column_to_text)
df_financials.loc[len(df_financials)] = constructed_row
return df_financials
| 44.862963 | 119 | 0.669694 |
79590437b5ed7df3a1392406a9c5fd717d4be296 | 762 | py | Python | cmake_configuration.py | Stefantb/CMakeTools | 3fd909e0d45034180e8cf97d73779468c555368f | [
"MIT"
] | null | null | null | cmake_configuration.py | Stefantb/CMakeTools | 3fd909e0d45034180e8cf97d73779468c555368f | [
"MIT"
] | null | null | null | cmake_configuration.py | Stefantb/CMakeTools | 3fd909e0d45034180e8cf97d73779468c555368f | [
"MIT"
] | null | null | null | class CMakeConfiguration:
__slots__ = ('cmake_binary', 'source_folder',
'build_folder', 'generator', 'arguments')
def __init__(self, cmake_binary: str, source_folder: str,
build_folder: str, generator: str, arguments: dict):
self.cmake_binary = cmake_binary
self.source_folder = source_folder
self.build_folder = build_folder
self.generator = generator
self.arguments = arguments
def __str__(self):
return 'CMakeConfiguration({},{},{},{},{}'.format(
self.cmake_binary,
self.source_folder,
self.build_folder,
self.generator,
self.arguments
)
def __repr__(self):
return self.__str__()
| 29.307692 | 69 | 0.595801 |
795906faf251573e44c7a500c239bb744593a6e9 | 957 | py | Python | atman/__main__.py | adityaruplaha/attendance-manager | c1305717936453b6250cedf21266dab1a04b7be5 | [
"BSD-3-Clause"
] | null | null | null | atman/__main__.py | adityaruplaha/attendance-manager | c1305717936453b6250cedf21266dab1a04b7be5 | [
"BSD-3-Clause"
] | 3 | 2021-01-04T16:36:15.000Z | 2021-03-08T06:26:55.000Z | atman/__main__.py | adityaruplaha/atman | c1305717936453b6250cedf21266dab1a04b7be5 | [
"BSD-3-Clause"
] | null | null | null | import getpass
import atman.manager
import atman.frontend
import colorama
username = input("Enter database username: ")
password = getpass.getpass(prompt="Enter database password: ")
database_name = input("Enter database name: ")
connection_parameters = {
'host': "localhost",
'port': 3306,
'user': username,
'passwd': password
}
data_source_parameters = {
'database_name': database_name,
'subject_table': "academic",
'attendance_table': "attendance"
}
colorama.init(autoreset=True)
m = atman.manager.Manager(
connection_parameters, data_source_parameters) # manager object created
m.refresh_classes()
f = atman.frontend.Frontend(m) # frontend object created
print()
print()
print("AtMan")
print("-----")
print()
print("Type 'help' for usage guide.")
print()
while f.running: # by default running = True
try:
f.exec()
except KeyboardInterrupt:
f.quit()
except EOFError:
f.quit()
| 19.9375 | 75 | 0.691745 |
795906feeb6e1ee56670258141c2513b412544ac | 1,257 | py | Python | myvenv/Lib/site-packages/payments/widgets.py | Fa67/saleor-shop | 76110349162c54c8bfcae61983bb59ba8fb0f778 | [
"BSD-3-Clause"
] | 3 | 2020-04-17T19:01:57.000Z | 2022-03-07T19:54:43.000Z | myvenv/Lib/site-packages/payments/widgets.py | Fa67/saleor-shop | 76110349162c54c8bfcae61983bb59ba8fb0f778 | [
"BSD-3-Clause"
] | 5 | 2020-03-24T16:37:25.000Z | 2021-06-10T21:24:54.000Z | upibo-venv/Lib/site-packages/payments/widgets.py | smbpgroup/upibo | 625dcda9f9692c62aeb9fe8f7123a5d407c610ae | [
"BSD-3-Clause"
] | 1 | 2020-08-18T18:15:04.000Z | 2020-08-18T18:15:04.000Z | import re
from django.forms.widgets import TextInput, MultiWidget, Select
class CreditCardNumberWidget(TextInput):
def render(self, name, value, attrs=None):
if value:
value = re.sub('[\s-]', '', value)
if len(value) == 16:
value = ' '.join([value[0:4], value[4:8],
value[8:12], value[12:16]])
elif len(value) == 15:
value = ' '.join([value[0:4], value[4:10], value[10:15]])
elif len(value) == 14:
value = ' '.join([value[0:4], value[4:10], value[10:14]])
return super(CreditCardNumberWidget, self).render(name, value, attrs)
# Credit Card Expiry Fields from:
# http://www.djangosnippets.org/snippets/907/
class CreditCardExpiryWidget(MultiWidget):
"""MultiWidget for representing credit card expiry date."""
template_name = 'payments/credit_card_expiry_widget.html'
def decompress(self, value):
if value:
return [value.month, value.year]
else:
return [None, None]
class SensitiveTextInput(TextInput):
template_name = 'payments/sensitive_text_input.html'
class SensitiveSelect(Select):
template_name = 'payments/sensitive_select.html'
| 30.658537 | 77 | 0.609387 |
7959074a47108d35de961c951512f8bb3d28998d | 3,376 | py | Python | monasca_analytics/sml/logistic_regression.py | daisuke-fujita/monsaca-analytics_20181107 | 5809e66874d76bd9f102e7694197bd849210fa3b | [
"Apache-2.0"
] | null | null | null | monasca_analytics/sml/logistic_regression.py | daisuke-fujita/monsaca-analytics_20181107 | 5809e66874d76bd9f102e7694197bd849210fa3b | [
"Apache-2.0"
] | 1 | 2019-01-21T09:44:29.000Z | 2019-01-21T09:44:29.000Z | monasca_analytics/sml/logistic_regression.py | daisuke-fujita/monsaca-analytics_20181107 | 5809e66874d76bd9f102e7694197bd849210fa3b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import numpy as np
from sklearn import linear_model
from sklearn.metrics import classification_report
import voluptuous
from monasca_analytics.sml.base import BaseSML
from monasca_analytics.util.validation_utils import NoSpaceCharacter
logger = logging.getLogger(__name__)
ANOMALY = 1
NON_ANOMALY = 0
N_SAMPLES = 1000
class LogisticRegression(BaseSML):
"""Anomaly detection based on the LogisticRegression algorithm"""
def __init__(self, _id, _config):
super(LogisticRegression, self).__init__(_id, _config)
self._nb_samples = int(_config['nb_samples'])
@staticmethod
def validate_config(_config):
log_reg_schema = voluptuous.Schema({
'module': voluptuous.And(
basestring, NoSpaceCharacter()),
'nb_samples': voluptuous.Or(float, int)
}, required=True)
return log_reg_schema(_config)
@staticmethod
def get_default_config():
return {
'module': LogisticRegression.__name__,
'nb_samples': N_SAMPLES
}
def get_params():
return [
params.ParamDescriptor('nb_samples', type_util.Number(), N_SAMPLES)
]
def number_of_samples_required(self):
return self._nb_samples
def _generate_train_test_sets(self, samples, ratio_train):
num_samples_train = int(len(samples) * ratio_train)
data, labels = np.hsplit(samples, [-1])
X_train = np.array(data[:num_samples_train])
_labels = np.array(labels[:num_samples_train])
X_train_label = _labels.ravel()
X_test = np.array(data[num_samples_train:])
_labels = np.array(labels[num_samples_train:])
X_test_label = _labels.ravel()
return X_train, X_train_label, X_test, X_test_label
def _get_best_detector(self, train, label):
detector = linear_model.LogisticRegression()
detector.fit(train, label)
return detector
def learn_structure(self, samples):
X_train, X_train_label, X_test, X_test_label = \
self._generate_train_test_sets(samples, 0.75)
logger.info('Training with ' + str(len(X_train)) +
'samples; testing with ' + str(len(X_test)) + ' samples.')
lr_detector = self._get_best_detector(X_train, X_train_label)
Y_test = lr_detector.predict(X_test)
num_anomalies = Y_test[Y_test == ANOMALY].size
logger.info('Found ' + str(num_anomalies) +
' anomalies in testing set')
logger.info('Confusion Matrix: \n{}'.
format(classification_report(
X_test_label,
Y_test,
target_names=['no', 'yes'])))
return lr_detector
| 33.425743 | 79 | 0.665284 |
79590788673c56827b2800c3f2364bd350a1d477 | 2,424 | py | Python | invenio_records/models.py | ppanero/invenio-records | b0b1481d04012e45cb71b5ae4019e91dde88d1e2 | [
"MIT"
] | null | null | null | invenio_records/models.py | ppanero/invenio-records | b0b1481d04012e45cb71b5ae4019e91dde88d1e2 | [
"MIT"
] | null | null | null | invenio_records/models.py | ppanero/invenio-records | b0b1481d04012e45cb71b5ae4019e91dde88d1e2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Record models."""
import uuid
from datetime import datetime
from invenio_db import db
from sqlalchemy.dialects import mysql, postgresql
from sqlalchemy_utils.types import JSONType, UUIDType
class Timestamp(object):
"""Timestamp model mix-in with fractional seconds support.
SQLAlchemy-Utils timestamp model does not have support for fractional
seconds.
"""
created = db.Column(
db.DateTime().with_variant(mysql.DATETIME(fsp=6), "mysql"),
default=datetime.utcnow,
nullable=False
)
updated = db.Column(
db.DateTime().with_variant(mysql.DATETIME(fsp=6), "mysql"),
default=datetime.utcnow,
nullable=False
)
@db.event.listens_for(Timestamp, 'before_update', propagate=True)
def timestamp_before_update(mapper, connection, target):
"""Update `updated` property with current time on `before_update` event."""
target.updated = datetime.utcnow()
class RecordMetadata(db.Model, Timestamp):
"""Represent a record metadata.
The RecordMetadata object contains a ``created`` and a ``updated``
properties that are automatically updated.
"""
# Enables SQLAlchemy-Continuum versioning
__versioned__ = {}
__tablename__ = 'records_metadata'
id = db.Column(
UUIDType,
primary_key=True,
default=uuid.uuid4,
)
"""Record identifier."""
json = db.Column(
db.JSON().with_variant(
postgresql.JSONB(none_as_null=True),
'postgresql',
).with_variant(
JSONType(),
'sqlite',
).with_variant(
JSONType(),
'mysql',
),
default=lambda: dict(),
nullable=True
)
"""Store metadata in JSON format.
When you create a new ``Record`` the ``json`` field value should never be
``NULL``. Default value is an empty dict. ``NULL`` value means that the
record metadata has been deleted.
"""
version_id = db.Column(db.Integer, nullable=False)
"""Used by SQLAlchemy for optimistic concurrency control."""
__mapper_args__ = {
'version_id_col': version_id
}
__all__ = (
'RecordMetadata',
)
| 25.515789 | 79 | 0.648515 |
795907a154d694b64c887e50696352ac36c8c823 | 22,077 | py | Python | mvpa2/generators/partition.py | thomastweets/PyMVPA | a9c05acd7569639bb636aed3c22a13b21559ca02 | [
"MIT"
] | 1 | 2016-08-23T05:04:09.000Z | 2016-08-23T05:04:09.000Z | mvpa2/generators/partition.py | thomastweets/PyMVPA | a9c05acd7569639bb636aed3c22a13b21559ca02 | [
"MIT"
] | null | null | null | mvpa2/generators/partition.py | thomastweets/PyMVPA | a9c05acd7569639bb636aed3c22a13b21559ca02 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""""""
__docformat__ = 'restructuredtext'
import numpy as np
from mvpa2.base.dochelpers import _repr_attrs
from mvpa2.support.utils import deprecated
from mvpa2.base.node import Node
from mvpa2.datasets.miscfx import coarsen_chunks
import mvpa2.misc.support as support
if __debug__:
from mvpa2.base import debug
class Partitioner(Node):
"""Generator node to partition a dataset.
Partitioning is done by adding a sample attribute that assigns samples to an
arbitrary number of partitions. Subclasses offer a variety of partitioning
technique that are useful in e.g. cross-validation procedures.
it is important to note that other than adding a new sample attribute input
datasets are not modified. In particular, there is no splitting of datasets
into multiple pieces. If this is desired, a Partitioner can be chained to a
`Splitter` node to achieve this.
"""
_STRATEGIES = ('first', 'random', 'equidistant')
_NPERLABEL_STR = ['equal', 'all']
def __init__(self,
count=None,
selection_strategy='equidistant',
attr='chunks',
space='partitions',
**kwargs):
"""
Parameters
----------
count : None or int
Desired number of splits to be output. It is limited by the
number of splits possible for a given splitter
(e.g. `OddEvenSplitter` can have only up to 2 splits). If None,
all splits are output (default).
selection_strategy : str
If `count` is not None, possible strategies are possible:
'first': First `count` splits are chosen;
'random': Random (without replacement) `count` splits are chosen;
'equidistant': Splits which are equidistant from each other.
attr : str
Sample attribute used to determine splits.
space : str
Name of the to be created sample attribute defining the partitions.
In addition, a dataset attribute named '``space``\_set' will be added
to each output dataset, indicating the number of the partition set
it corresponds to.
"""
Node.__init__(self, space=space, **kwargs)
# pylint happyness block
self.__attr = attr
# we don't check it, thus no reason to make it private.
# someone might find it useful to change post creation
# TODO utilize such (or similar) policy through out the code
self.count = count
self._set_selection_strategy(selection_strategy)
def __repr__(self, prefixes=[]):
# Here we are jumping over Node's __repr__ since
# it would enforce placing space
return super(Node, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['count'])
+ _repr_attrs(self, ['selection_strategy'], default='equidistant')
+ _repr_attrs(self, ['attr'], default='chunks')
+ _repr_attrs(self, ['space'], default='partitions')
)
def _set_selection_strategy(self, strategy):
"""Set strategy to select splits out from available
"""
strategy = strategy.lower()
if not strategy in self._STRATEGIES:
raise ValueError, "selection_strategy is not known. Known are %s" \
% str(self._STRATEGIES)
self.__selection_strategy = strategy
def _get_partition_specs(self, uniqueattr):
"""Return list with samples of 2nd dataset in a split.
Each subclass has to implement this method. It gets a sequence with
the unique attribute ids of a dataset and has to return a list of lists
containing sample ids to split into the second dataset.
"""
raise NotImplementedError
def generate(self, ds):
# for each split
cfgs = self.get_partition_specs(ds)
n_cfgs = len(cfgs)
for iparts, parts in enumerate(cfgs):
# give attribute array defining the current partition set
pattr = self.get_partitions_attr(ds, parts)
# shallow copy of the dataset
pds = ds.copy(deep=False)
pds.sa[self.get_space()] = pattr
pds.a[self.get_space() + "_set"] = iparts
pds.a['lastpartitionset'] = iparts == (n_cfgs - 1)
yield pds
def get_partitions_attr(self, ds, specs):
"""Create a partition attribute array for a particular partion spec.
Parameters
----------
ds : Dataset
This is this source dataset.
specs : sequence of sequences
Contains ids of a sample attribute that shall go into each partition.
Returns
-------
array(ints)
Each partition is represented by a unique integer value.
"""
# collect the sample ids for each resulting dataset
filters = []
none_specs = 0
cum_filter = None
splitattr_data = ds.sa[self.__attr].value
# for each partition in this set
for spec in specs:
if spec is None:
filters.append(None)
none_specs += 1
else:
filter_ = np.array([ i in spec \
for i in splitattr_data], dtype='bool')
filters.append(filter_)
if cum_filter is None:
cum_filter = filter_
else:
cum_filter = np.logical_and(cum_filter, filter_)
# need to turn possible Nones into proper ids sequences
if none_specs > 1:
raise ValueError("'%s' cannot handle more than one `None` " \
"partition spec." % self.__class__.__name__)
# go with ints for simplicity. By default the attr is zeros, and the
# first configured partition starts with one.
part_attr = np.zeros(len(ds), dtype='int')
for i, filter_ in enumerate(filters):
# turn the one 'all the rest' filter into a slicing arg
if filter_ is None:
filter_ = np.logical_not(cum_filter)
# now filter is guaranteed to be a slicing argument that can be used
# to assign the attribute values
part_attr[filter_] = i + 1
return part_attr
def get_partition_specs(self, ds):
"""Returns the specs for all to be generated partition sets.
Returns
-------
list(lists)
"""
# list (#splits) of lists (#partitions)
cfgs = self._get_partition_specs(ds.sa[self.__attr].unique)
# Select just some splits if desired
count, n_cfgs = self.count, len(cfgs)
# further makes sense only if count < n_cfgs,
# otherwise all strategies are equivalent
if count is not None and count < n_cfgs:
if count < 1:
# we can only wish a good luck
return []
strategy = self.selection_strategy
if strategy == 'first':
cfgs = cfgs[:count]
elif strategy in ['equidistant', 'random']:
if strategy == 'equidistant':
# figure out what step is needed to
# accommodate the `count` number
step = float(n_cfgs) / count
assert(step >= 1.0)
indexes = [int(round(step * i)) for i in xrange(count)]
elif strategy == 'random':
indexes = np.random.permutation(range(n_cfgs))[:count]
# doesn't matter much but lets keep them in the original
# order at least
indexes.sort()
else:
# who said that I am paranoid?
raise RuntimeError, "Really should not happen"
if __debug__:
debug("SPL", "For %s selection strategy selected %s "
"partition specs from %d total"
% (strategy, indexes, n_cfgs))
cfgs = [cfgs[i] for i in indexes]
return cfgs
@property
@deprecated("to be removed in PyMVPA 2.1; use .attr instead")
def splitattr(self):
return self.attr
selection_strategy = property(fget=lambda self:self.__selection_strategy,
fset=_set_selection_strategy)
attr = property(fget=lambda self: self.__attr)
class OddEvenPartitioner(Partitioner):
"""Create odd and even partitions based on a sample attribute.
The partitioner yields two datasets. In the first set all odd chunks are
labeled '1' and all even runs are labeled '2'. In the second set the
assignment is reversed (odd: '2', even: '1').
"""
def __init__(self, usevalues=False, **kwargs):
"""
Parameters
----------
usevalues : bool
If True the values of the attribute used for partitioning will be
used to determine odd and even samples. If False odd and even
chunks are defined by the order of attribute values, i.e. first
unique attribute is odd, second is even, despite the
corresponding values might indicate the opposite (e.g. in case
of [2,3].
"""
Partitioner.__init__(self, **(kwargs))
self.__usevalues = usevalues
def __repr__(self, prefixes=[]):
return super(OddEvenPartitioner, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['usevalues'], default=False))
def _get_partition_specs(self, uniqueattrs):
"""
Returns
-------
list of tuples (None, list of int)
2 items: odd samples into 1st split
"""
if self.__usevalues:
return [(None, uniqueattrs[(uniqueattrs % 2) == True]),
(None, uniqueattrs[(uniqueattrs % 2) == False])]
else:
return [(None, uniqueattrs[np.arange(len(uniqueattrs)) %2 == True]),
(None, uniqueattrs[np.arange(len(uniqueattrs)) %2 == False])]
usevalues = property(fget=lambda self: self.__usevalues)
class HalfPartitioner(Partitioner):
"""Partition a dataset into two halves of the sample attribute.
The partitioner yields two datasets. In the first set second half of
chunks are labeled '1' and the first half labeled '2'. In the second set the
assignment is reversed (1st half: '1', 2nd half: '2').
"""
def _get_partition_specs(self, uniqueattrs):
"""
Returns
-------
list of tuples (None, list of int)
2 items: first half of samples into 1st split
"""
return [(None, uniqueattrs[:len(uniqueattrs)/2]),
(None, uniqueattrs[len(uniqueattrs)/2:])]
class NGroupPartitioner(Partitioner):
"""Partition a dataset into N-groups of the sample attribute.
For example, NGroupPartitioner(2) is the same as the HalfPartitioner and
yields exactly the same partitions and labeling patterns.
"""
def __init__(self, ngroups=4, **kwargs):
"""
Parameters
----------
ngroups : int
Number of groups to split the attribute into.
"""
Partitioner.__init__(self, **(kwargs))
self.__ngroups = ngroups
def __repr__(self, prefixes=[]):
return super(NGroupPartitioner, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['ngroups'], default=4))
def _get_partition_specs(self, uniqueattrs):
"""
Returns
-------
list of tuples (None, list of int)
Indices for splitting
"""
# make sure there are more of attributes than desired groups
if len(uniqueattrs) < self.__ngroups:
raise ValueError("Number of groups (%d) " % (self.__ngroups) + \
"must be less than " + \
"or equal to the number of unique attributes (%d)" % \
(len(uniqueattrs)))
# use coarsen_chunks to get the split indices
split_ind = coarsen_chunks(uniqueattrs, nchunks=self.__ngroups)
split_ind = np.asarray(split_ind)
# loop and create splits
split_list = [(None, uniqueattrs[split_ind==i])
for i in range(self.__ngroups)]
return split_list
ngroups = property(fget=lambda self: self.__ngroups)
class CustomPartitioner(Partitioner):
"""Partition a dataset using an arbitrary custom rule.
The partitioner is configured by passing a custom rule (``splitrule``) to its
constructor. Such a rule is basically a sequence of partition definitions.
Every single element in this sequence results in exactly one partition set.
Each element is another sequence of attribute values whose corresponding
samples shall go into a particular partition.
Examples
--------
Generate two sets. In the first set the *second* partition
contains all samples with sample attributes corresponding to
either 0, 1 or 2. The *first* partition of the first set contains
all samples which are not part of the second partition.
The second set yields three partitions. The first with all samples
corresponding to sample attributes 1 and 2, the second contains only
samples with attribute 3 and the last contains the samples with attribute 5
and 6.
>>> ptr = CustomPartitioner([(None, [0, 1, 2]), ([1,2], [3], [5, 6])])
The numeric labels of all partitions correspond to their position in the
``splitrule`` of a particular set. Note that the actual labels start with
'1' as all unselected elements are labeled '0'.
"""
def __init__(self, splitrule, **kwargs):
"""
Parameters
----------
splitrule : list of tuple
Custom partition set specs.
"""
Partitioner.__init__(self, **(kwargs))
self.splitrule = splitrule
def __repr__(self, prefixes=[]):
return super(CustomPartitioner, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['splitrule']))
def _get_partition_specs(self, uniqueattrs):
"""
Returns
-------
whatever was provided in splitrule argument
"""
return self.splitrule
class NFoldPartitioner(Partitioner):
"""Generic N-fold data partitioner.
Given a dataset with N chunks, with ``cvtype`` = 1 (which is default), it
would generate N partition sets, where each chunk is sequentially taken out
(with replacement) to form a second partition, while all other samples
together form the first partition. Example, if there are 4 chunks, partition
sets for ``cvtype`` = 1 are::
[[1, 2, 3], [0]]
[[0, 2, 3], [1]]
[[0, 1, 3], [2]]
[[0, 1, 2], [3]]
If ``cvtype``>1, then all possible combinations of ``cvtype`` number of
chunks are taken out, so for ``cvtype`` = 2 in previous example yields::
[[2, 3], [0, 1]]
[[1, 3], [0, 2]]
[[1, 2], [0, 3]]
[[0, 3], [1, 2]]
[[0, 2], [1, 3]]
[[0, 1], [2, 3]]
Note that the "taken-out" partition is always labeled '2' while the
remaining elements are labeled '1'.
If ``cvtype`` is a float in the range from 0 to 1, it specifies
the ratio of present unique values to be taken.
If ``cvtype`` is large enough generating prohibitively large
number of combinations, provide ``count`` to limit number of
combinations and provide ``selection_strategy`` = 'random'.
"""
_DEV__doc__ = """
Might want to make it smarter and implement generate() generator?
Especially for the cases which use xrandom_unique_combinations
All needed machinery is there
"""
def __init__(self, cvtype=1, **kwargs):
"""
Parameters
----------
cvtype : int, float
Type of leave-one-out scheme: N-(cvtype). float value
(0..1) specifies ratio of samples to be taken into the
combination (e.g. 0.5 for 50%) given a dataset
"""
Partitioner.__init__(self, **kwargs)
if isinstance(cvtype, float):
# some checks
if not (0 < cvtype < 1):
raise ValueError("Float value for cvtype must be within range "
"(0, 1), excluding boundaries. Got %r."
% cvtype)
self.cvtype = cvtype
def __repr__(self, prefixes=[]): #pylint: disable-msg=W0102
return super(NFoldPartitioner, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['cvtype'], default=1))
def _get_partition_specs(self, uniqueattrs):
if isinstance(self.cvtype, float):
n = int(self.cvtype * len(uniqueattrs))
else:
n = self.cvtype
if self.count is None \
or self.selection_strategy != 'random' \
or self.count >= support.ncombinations(len(uniqueattrs), n):
# all combinations were requested so no need for
# randomization
combs = support.xunique_combinations(uniqueattrs, n)
else:
# due to selection_strategy=random they would be also
# reshuffled by super class later on but that should be ok
combs = support.xrandom_unique_combinations(uniqueattrs, n,
self.count)
if self.count is None or self.selection_strategy != 'random':
# we are doomed to return all of them
return [(None, i) for i in combs]
else:
# It makes sense to limit number of returned combinations
# right away
return [(None, i) for ind, i in enumerate(combs)
if ind < self.count]
class ExcludeTargetsCombinationsPartitioner(Node):
"""Exclude combinations for a given partition from other partitions
Given a pre-generated partitioning this generates new partitions
by selecting all possible combinations of k-targets from all
targets and excluding samples with the selected k-targets from
training partition for each combination.
A simple example would be:
Examples
--------
For a dataset with 3 classes with one sample per class, k=2 gives
3 combination partitions with 2 samples for testing and one sample
for training (since it excludes the 2 selected target samples) per
partition.
>>> from mvpa2.base.node import ChainNode
>>> partitioner = ChainNode([NFoldPartitioner(),
... ExcludeTargetsCombinationsPartitioner(
... k=2,
... targets_attr='targets',
... space='partitions')],
... space='partitions')
While cross-validating across subjects (e.g. working with
hyperaligned data), to avoid significant bias due to matching
trial-order effects instead of categorical boundaries, it is
important to exclude from training chunks with the order matching
the ones in testing.
>>> partitioner = ChainNode([NFoldPartitioner(attr='subject'),
... ExcludeTargetsCombinationsPartitioner(
... k=1,
... targets_attr='chunks',
... space='partitions')],
... space='partitions')
"""
def __init__(self, k,
targets_attr,
partitions_attr='partitions',
partitions_keep=2, # default for testing partition
partition_assign=3, # assign one which Splitter doesn't even get to
**kwargs):
Node.__init__(self, **kwargs)
self.k = k
self.targets_attr = targets_attr
self.partitions_attr = partitions_attr
self.partitions_keep = partitions_keep
self.partition_assign = partition_assign
def __repr__(self, prefixes=[]):
# Here we are jumping over Node's __repr__ since
# it would enforce placing space
return super(ExcludeTargetsCombinationsPartitioner, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['k', 'targets_attr'])
+ _repr_attrs(self, ['partitions_attr'], default='partitions')
+ _repr_attrs(self, ['partitions_keep'], default=2)
+ _repr_attrs(self, ['partition_assign'], default=3)
)
def generate(self, ds):
orig_partitioning = ds.sa[self.partitions_attr].value.copy()
targets = ds.sa[self.targets_attr].value
testing_part = orig_partitioning == self.partitions_keep
nontesting_part = np.logical_not(testing_part)
utargets = np.unique(targets[testing_part])
for combination in support.xunique_combinations(utargets, self.k):
partitioning = orig_partitioning.copy()
combination_matches = [ t in combination for t in targets ]
combination_nonmatches = np.logical_not(combination_matches)
partitioning[np.logical_and(testing_part,
combination_nonmatches)] \
= self.partition_assign
partitioning[np.logical_and(nontesting_part,
combination_matches)] \
= self.partition_assign
pds = ds.copy(deep=False)
pds.sa[self.space] = partitioning
yield pds
| 37.93299 | 84 | 0.592064 |
795907e92e52c9832c578a4d9ba7a154b9136a39 | 475 | py | Python | makemessages.py | jackton1/django-service-objects | cdcaedb64154b949ab6c5e5de60b4f9835f1cc98 | [
"MIT"
] | 328 | 2017-08-13T19:09:31.000Z | 2022-03-30T09:02:35.000Z | makemessages.py | jackton1/django-service-objects | cdcaedb64154b949ab6c5e5de60b4f9835f1cc98 | [
"MIT"
] | 50 | 2017-08-17T02:31:49.000Z | 2022-02-23T22:45:13.000Z | makemessages.py | jackton1/django-service-objects | cdcaedb64154b949ab6c5e5de60b4f9835f1cc98 | [
"MIT"
] | 32 | 2017-08-15T03:29:53.000Z | 2022-01-24T22:18:05.000Z | import os
import django
from django.conf import settings
from django.core import management
BASE_DIR = os.path.dirname(__file__)
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
LOCALE_PATHS=(
os.path.join(BASE_DIR, 'locale'),
),
)
django.setup()
management.call_command('makemessages', "--locale=pt_BR", verbosity=0,
interactive=False)
| 19.791667 | 70 | 0.604211 |
795907f67ca47c3be4b267da774edd733ef4c68b | 1,034 | py | Python | kubernetes_asyncio/test/test_v1_projected_volume_source.py | aK0nshin/kubernetes_asyncio | aef9edcc1f8671a5b1bba9f4684bde890176b19c | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_v1_projected_volume_source.py | aK0nshin/kubernetes_asyncio | aef9edcc1f8671a5b1bba9f4684bde890176b19c | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_v1_projected_volume_source.py | aK0nshin/kubernetes_asyncio | aef9edcc1f8671a5b1bba9f4684bde890176b19c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1_projected_volume_source import V1ProjectedVolumeSource # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1ProjectedVolumeSource(unittest.TestCase):
"""V1ProjectedVolumeSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1ProjectedVolumeSource(self):
"""Test V1ProjectedVolumeSource"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1_projected_volume_source.V1ProjectedVolumeSource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.85 | 124 | 0.743714 |
7959085f033f86092c7627b0914a72900d850aa3 | 22,082 | py | Python | Python27/Lib/bsddb/test/test_replication.py | Jeff-Tian/mybnb | 1a42890a1d2f1344d5465f8be10c42df01964f5a | [
"Apache-2.0"
] | 1 | 2019-05-17T08:58:11.000Z | 2019-05-17T08:58:11.000Z | Python27/Lib/bsddb/test/test_replication.py | Jeff-Tian/mybnb | 1a42890a1d2f1344d5465f8be10c42df01964f5a | [
"Apache-2.0"
] | 2 | 2016-12-12T05:54:58.000Z | 2016-12-12T05:55:44.000Z | Python27/Lib/bsddb/test/test_replication.py | Jeff-Tian/mybnb | 1a42890a1d2f1344d5465f8be10c42df01964f5a | [
"Apache-2.0"
] | null | null | null | """TestCases for distributed transactions.
"""
import os
import time
import unittest
from test_all import db, test_support, have_threads, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class DBReplication(unittest.TestCase) :
def setUp(self) :
self.homeDirMaster = get_new_environment_path()
self.homeDirClient = get_new_environment_path()
self.dbenvMaster = db.DBEnv()
self.dbenvClient = db.DBEnv()
# Must use "DB_THREAD" because the Replication Manager will
# be executed in other threads but will use the same environment.
# http://forums.oracle.com/forums/thread.jspa?threadID=645788&tstart=0
self.dbenvMaster.open(self.homeDirMaster, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
self.dbenvClient.open(self.homeDirClient, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
self.confirmed_master=self.client_startupdone=False
def confirmed_master(a,b,c) :
if b==db.DB_EVENT_REP_MASTER :
self.confirmed_master=True
def client_startupdone(a,b,c) :
if b==db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone=True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
self.dbMaster = self.dbClient = None
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
# Here we assign dummy event handlers to allow GC of the test object.
# Since the dummy handler doesn't use any outer scope variable, it
# doesn't keep any reference to the test object.
def dummy(*args) :
pass
self.dbenvMaster.set_event_notify(dummy)
self.dbenvClient.set_event_notify(dummy)
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
class DBReplicationManager(DBReplication) :
def test01_basic_replication(self) :
master_port = test_support.find_unused_port()
client_port = test_support.find_unused_port()
if db.version() >= (5, 2) :
self.site = self.dbenvMaster.repmgr_site("127.0.0.1", master_port)
self.site.set_config(db.DB_GROUP_CREATOR, True)
self.site.set_config(db.DB_LOCAL_SITE, True)
self.site2 = self.dbenvMaster.repmgr_site("127.0.0.1", client_port)
self.site3 = self.dbenvClient.repmgr_site("127.0.0.1", master_port)
self.site3.set_config(db.DB_BOOTSTRAP_HELPER, True)
self.site4 = self.dbenvClient.repmgr_site("127.0.0.1", client_port)
self.site4.set_config(db.DB_LOCAL_SITE, True)
d = {
db.DB_BOOTSTRAP_HELPER: [False, False, True, False],
db.DB_GROUP_CREATOR: [True, False, False, False],
db.DB_LEGACY: [False, False, False, False],
db.DB_LOCAL_SITE: [True, False, False, True],
db.DB_REPMGR_PEER: [False, False, False, False ],
}
for i, j in d.items() :
for k, v in \
zip([self.site, self.site2, self.site3, self.site4], j) :
if v :
self.assertTrue(k.get_config(i))
else :
self.assertFalse(k.get_config(i))
self.assertNotEqual(self.site.get_eid(), self.site2.get_eid())
self.assertNotEqual(self.site3.get_eid(), self.site4.get_eid())
for i, j in zip([self.site, self.site2, self.site3, self.site4], \
[master_port, client_port, master_port, client_port]) :
addr = i.get_address()
self.assertEqual(addr, ("127.0.0.1", j))
for i in [self.site, self.site2] :
self.assertEqual(i.get_address(),
self.dbenvMaster.repmgr_site_by_eid(i.get_eid()).get_address())
for i in [self.site3, self.site4] :
self.assertEqual(i.get_address(),
self.dbenvClient.repmgr_site_by_eid(i.get_eid()).get_address())
else :
self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
self.dbenvMaster.rep_set_nsites(2)
self.dbenvClient.rep_set_nsites(2)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_priority(0)
self.dbenvMaster.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100123)
self.dbenvClient.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100321)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100123)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100321)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100234)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100432)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100234)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100432)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100345)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100543)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100345)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100543)
self.dbenvMaster.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvClient.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvMaster.repmgr_start(1, db.DB_REP_MASTER);
self.dbenvClient.repmgr_start(1, db.DB_REP_CLIENT);
self.assertEqual(self.dbenvMaster.rep_get_nsites(),2)
self.assertEqual(self.dbenvClient.rep_get_nsites(),2)
self.assertEqual(self.dbenvMaster.rep_get_priority(),10)
self.assertEqual(self.dbenvClient.rep_get_priority(),0)
self.assertEqual(self.dbenvMaster.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
self.assertEqual(self.dbenvClient.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+60
while (time.time()<timeout) and not (self.confirmed_master and self.client_startupdone) :
time.sleep(0.02)
# self.client_startupdone does not always get set to True within
# the timeout. On windows this may be a deep issue, on other
# platforms it is likely just a timing issue, especially on slow
# virthost buildbots (see issue 3892 for more). Even though
# the timeout triggers, the rest of this test method usually passes
# (but not all of it always, see below). So we just note the
# timeout on stderr and keep soldering on.
if time.time()>timeout:
import sys
print >> sys.stderr, ("XXX: timeout happened before"
"startup was confirmed - see issue 3892")
startup_timeout = True
d = self.dbenvMaster.repmgr_site_list()
self.assertEqual(len(d), 1)
d = d.values()[0] # There is only one
self.assertEqual(d[0], "127.0.0.1")
self.assertEqual(d[1], client_port)
self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
(d[2]==db.DB_REPMGR_DISCONNECTED))
d = self.dbenvClient.repmgr_site_list()
self.assertEqual(len(d), 1)
d = d.values()[0] # There is only one
self.assertEqual(d[0], "127.0.0.1")
self.assertEqual(d[1], master_port)
self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
(d[2]==db.DB_REPMGR_DISCONNECTED))
if db.version() >= (4,6) :
d = self.dbenvMaster.repmgr_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("msgs_queued" in d)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v is None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
# If startup did not happen before the timeout above, then this test
# sometimes fails. This happens randomly, which causes buildbot
# instability, but all the other bsddb tests pass. Since bsddb3 in the
# stdlib is currently not getting active maintenance, and is gone in
# py3k, we just skip the end of the test in that case.
if time.time()>=timeout and startup_timeout:
self.skipTest("replication test skipped due to random failure, "
"see issue 3892")
self.assertTrue(time.time()<timeout)
self.assertEqual("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v is not None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEqual(None, v)
class DBBaseReplication(DBReplication) :
def setUp(self) :
DBReplication.setUp(self)
def confirmed_master(a,b,c) :
if (b == db.DB_EVENT_REP_MASTER) or (b == db.DB_EVENT_REP_ELECTED) :
self.confirmed_master = True
def client_startupdone(a,b,c) :
if b == db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone = True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
import Queue
self.m2c = Queue.Queue()
self.c2m = Queue.Queue()
# There are only two nodes, so we don't need to
# do any routing decision
def m2c(dbenv, control, rec, lsnp, envid, flags) :
self.m2c.put((control, rec))
def c2m(dbenv, control, rec, lsnp, envid, flags) :
self.c2m.put((control, rec))
self.dbenvMaster.rep_set_transport(13,m2c)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_transport(3,c2m)
self.dbenvClient.rep_set_priority(0)
self.assertEqual(self.dbenvMaster.rep_get_priority(),10)
self.assertEqual(self.dbenvClient.rep_get_priority(),0)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
def thread_master() :
return self.thread_do(self.dbenvMaster, self.c2m, 3,
self.master_doing_election, True)
def thread_client() :
return self.thread_do(self.dbenvClient, self.m2c, 13,
self.client_doing_election, False)
from threading import Thread
t_m=Thread(target=thread_master)
t_c=Thread(target=thread_client)
import sys
if sys.version_info[0] < 3 :
t_m.setDaemon(True)
t_c.setDaemon(True)
else :
t_m.daemon = True
t_c.daemon = True
self.t_m = t_m
self.t_c = t_c
self.dbMaster = self.dbClient = None
self.master_doing_election=[False]
self.client_doing_election=[False]
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
self.m2c.put(None)
self.c2m.put(None)
self.t_m.join()
self.t_c.join()
# Here we assign dummy event handlers to allow GC of the test object.
# Since the dummy handler doesn't use any outer scope variable, it
# doesn't keep any reference to the test object.
def dummy(*args) :
pass
self.dbenvMaster.set_event_notify(dummy)
self.dbenvClient.set_event_notify(dummy)
self.dbenvMaster.rep_set_transport(13,dummy)
self.dbenvClient.rep_set_transport(3,dummy)
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
def basic_rep_threading(self) :
self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v is None : return
env.rep_process_message(v[0], v[1], envid)
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
def test01_basic_replication(self) :
self.basic_rep_threading()
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+60
while (time.time()<timeout) and not (self.confirmed_master and
self.client_startupdone) :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
d = self.dbenvMaster.rep_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("master_changes" in d)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v is None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEqual("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v is not None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEqual(None, v)
if db.version() >= (4,7) :
def test02_test_request(self) :
self.basic_rep_threading()
(minimum, maximum) = self.dbenvClient.rep_get_request()
self.dbenvClient.rep_set_request(minimum-1, maximum+1)
self.assertEqual(self.dbenvClient.rep_get_request(),
(minimum-1, maximum+1))
if db.version() >= (4,6) :
def test03_master_election(self) :
# Get ready to hold an election
#self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvMaster.rep_start(flags=db.DB_REP_CLIENT)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v is None : return
r = env.rep_process_message(v[0],v[1],envid)
if must_be_master and self.confirmed_master :
self.dbenvMaster.rep_start(flags = db.DB_REP_MASTER)
must_be_master = False
if r[0] == db.DB_REP_HOLDELECTION :
def elect() :
while True :
try :
env.rep_elect(2, 1)
election_status[0] = False
break
except db.DBRepUnavailError :
pass
if not election_status[0] and not self.confirmed_master :
from threading import Thread
election_status[0] = True
t=Thread(target=elect)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.client_doing_election[0] = True
while True :
try :
self.dbenvClient.rep_elect(2, 1)
self.client_doing_election[0] = False
break
except db.DBRepUnavailError :
pass
self.assertTrue(self.confirmed_master)
# Race condition showed up after upgrading to Solaris 10 Update 10
# https://forums.oracle.com/forums/thread.jspa?messageID=9902860
# jcea@jcea.es: See private email from Paula Bingham (Oracle),
# in 20110929.
while not (self.dbenvClient.rep_stat()["startup_complete"]) :
pass
if db.version() >= (4,7) :
def test04_test_clockskew(self) :
fast, slow = 1234, 1230
self.dbenvMaster.rep_set_clockskew(fast, slow)
self.assertEqual((fast, slow),
self.dbenvMaster.rep_get_clockskew())
self.basic_rep_threading()
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (4, 6) :
dbenv = db.DBEnv()
try :
dbenv.repmgr_get_ack_policy()
ReplicationManager_available=True
except :
ReplicationManager_available=False
dbenv.close()
del dbenv
if ReplicationManager_available :
suite.addTest(unittest.makeSuite(DBReplicationManager))
if have_threads :
suite.addTest(unittest.makeSuite(DBBaseReplication))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 40.591912 | 98 | 0.577212 |
79590923c22ea1d25379dad0673a358ddf6d5faf | 1,537 | py | Python | main.py | mkcarl/NovelScraper | 8c6bc11197add9149057edfc0d9c7105745aa24a | [
"MIT"
] | null | null | null | main.py | mkcarl/NovelScraper | 8c6bc11197add9149057edfc0d9c7105745aa24a | [
"MIT"
] | null | null | null | main.py | mkcarl/NovelScraper | 8c6bc11197add9149057edfc0d9c7105745aa24a | [
"MIT"
] | null | null | null | import WebProcesses
from ebooklib import epub
import os
import time
def main():
NOVEL_BASE_URL = "https://www.wuxiaworld.com/novel/rmji/rmji-chapter-"
novel = WebProcesses.Novel(NOVEL_BASE_URL, 0, 2110) # Change me
novel.content_id = "chapter-content" # Change me
novel.name = "A Record of a Mortal's Journey to Immortality".replace(' ', '_') # Change me
novel.extract_content(asynchronous=True, ratelimit=60) # Change me
novel.export("html")
book = epub.EpubBook()
book.set_identifier("rmji") # Change me
book.set_title(novel.name.replace('_', ' '))
book.set_language("en")
book.add_author("Default Author") # Change me
book_chapters = []
for chapter in os.listdir(f"chapters/{novel.name}"):
with open(f"chapters/{novel.name}/{chapter}", "r", encoding="utf-8") as f:
c = epub.EpubHtml(
title=chapter,
file_name=f"{chapter}.html"
)
c.set_content(f.read())
book_chapters.append(c)
for chapter in book_chapters:
book.add_item(chapter)
book_chapters_link = [epub.Link(x.file_name, x.title.split('.')[0], x.title.split('.')[0]) for x in book_chapters]
book.toc = tuple(book_chapters_link)
book.spine = book_chapters
book.add_item(epub.EpubNcx())
book.add_item(epub.EpubNav())
epub.write_epub(f"{book.title}.epub", book, {})
def print_timer(func):
t1 = time.time()
func()
print(f"Process took {time.time() - t1} seconds")
print_timer(main)
| 31.367347 | 118 | 0.640859 |
795909c59fbbd0a7f67f1165db0673e3b04a07df | 576 | py | Python | the_site/auth/views.py | Er-sumit/django-jwt | 813beec098112f4b731e10319fda656384dfeeb7 | [
"MIT"
] | null | null | null | the_site/auth/views.py | Er-sumit/django-jwt | 813beec098112f4b731e10319fda656384dfeeb7 | [
"MIT"
] | null | null | null | the_site/auth/views.py | Er-sumit/django-jwt | 813beec098112f4b731e10319fda656384dfeeb7 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.views import View
def index(request):
return HttpResponse("Hello, world. You're at the auth index, we'll begin authentication shortly")
class HelloView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
content = {'message': 'Hello, World!'}
return Response(content) | 27.428571 | 101 | 0.765625 |
79590e61f5ae980f173fba603325289bc3bcaf26 | 6,407 | py | Python | k2/python/tests/index_test.py | pzelasko/k2 | 2dbb3e09b152fcf98354c946baa271e5b57c8321 | [
"Apache-2.0"
] | null | null | null | k2/python/tests/index_test.py | pzelasko/k2 | 2dbb3e09b152fcf98354c946baa271e5b57c8321 | [
"Apache-2.0"
] | null | null | null | k2/python/tests/index_test.py | pzelasko/k2 | 2dbb3e09b152fcf98354c946baa271e5b57c8321 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
# Xiaomi Corporation (authors: Haowen Qiu)
#
# See ../../../LICENSE for clarification regarding multiple authors
# To run this single test, use
#
# ctest --verbose -R index_test_py
import unittest
import k2
import torch
class TestIndex(unittest.TestCase):
def test(self):
s0 = '''
0 1 1 0.1
0 2 2 0.2
1 2 3 0.3
2 3 -1 0.4
3
'''
s1 = '''
0 1 -1 0.5
1
'''
s2 = '''
0 2 1 0.6
0 1 2 0.7
1 3 -1 0.8
2 1 3 0.9
3
'''
fsa0 = k2.Fsa.from_str(s0).requires_grad_(True)
fsa1 = k2.Fsa.from_str(s1).requires_grad_(True)
fsa2 = k2.Fsa.from_str(s2).requires_grad_(True)
fsa_vec = k2.create_fsa_vec([fsa0, fsa1, fsa2])
new_fsa21 = k2.index(fsa_vec, torch.tensor([2, 1], dtype=torch.int32))
assert new_fsa21.shape == (2, None, None)
assert torch.allclose(
new_fsa21.arcs.values()[:, :3],
torch.tensor([
# fsa 2
[0, 2, 1],
[0, 1, 2],
[1, 3, -1],
[2, 1, 3],
# fsa 1
[0, 1, -1]
]).to(torch.int32))
scale = torch.arange(new_fsa21.scores.numel())
(new_fsa21.scores * scale).sum().backward()
assert torch.allclose(fsa0.scores.grad, torch.tensor([0., 0, 0, 0]))
assert torch.allclose(fsa1.scores.grad, torch.tensor([4.]))
assert torch.allclose(fsa2.scores.grad, torch.tensor([0., 1., 2., 3.]))
# now select only a single FSA
fsa0.scores.grad = None
fsa1.scores.grad = None
fsa2.scores.grad = None
new_fsa0 = k2.index(fsa_vec, torch.tensor([0], dtype=torch.int32))
assert new_fsa0.shape == (1, None, None)
scale = torch.arange(new_fsa0.scores.numel())
(new_fsa0.scores * scale).sum().backward()
assert torch.allclose(fsa0.scores.grad, torch.tensor([0., 1., 2., 3.]))
assert torch.allclose(fsa1.scores.grad, torch.tensor([0.]))
assert torch.allclose(fsa2.scores.grad, torch.tensor([0., 0., 0., 0.]))
class TestIndexRaggedInt(unittest.TestCase):
def test(self):
devices = [torch.device('cpu')]
if torch.cuda.is_available():
devices.append(torch.device('cuda', 0))
for device in devices:
src_row_splits = torch.tensor([0, 2, 3, 3, 6],
dtype=torch.int32,
device=device)
src_shape = k2.create_ragged_shape2(src_row_splits, None, 6)
src_values = torch.tensor([1, 2, 3, 4, 5, 6],
dtype=torch.int32,
device=device)
src = k2.RaggedInt(src_shape, src_values)
# index with ragged int
index_row_splits = torch.tensor([0, 2, 2, 3, 7],
dtype=torch.int32,
device=device)
index_shape = k2.create_ragged_shape2(index_row_splits, None, 7)
index_values = torch.tensor([0, 3, 2, 1, 2, 1, 0],
dtype=torch.int32,
device=device)
ragged_index = k2.RaggedInt(index_shape, index_values)
ans = k2.index_ragged_int(src, ragged_index)
expected_row_splits = torch.tensor([0, 5, 5, 5, 9],
dtype=torch.int32,
device=device)
self.assertTrue(
torch.allclose(ans.row_splits(1), expected_row_splits))
expected_values = torch.tensor([1, 2, 4, 5, 6, 3, 3, 1, 2],
dtype=torch.int32,
device=device)
self.assertTrue(torch.allclose(ans.values(), expected_values))
# index with tensor
tensor_index = torch.tensor([0, 3, 2, 1, 2, 1],
dtype=torch.int32,
device=device)
ans = k2.index_ragged_int(src, tensor_index)
expected_row_splits = torch.tensor([0, 2, 5, 5, 6, 6, 7],
dtype=torch.int32,
device=device)
self.assertTrue(
torch.allclose(ans.row_splits(1), expected_row_splits))
expected_values = torch.tensor([1, 2, 4, 5, 6, 3, 3],
dtype=torch.int32,
device=device)
self.assertTrue(torch.allclose(ans.values(), expected_values))
class TestIndexTensorWithRaggedInt(unittest.TestCase):
def test(self):
devices = [torch.device('cpu')]
if torch.cuda.is_available():
devices.append(torch.device('cuda', 0))
for device in devices:
src = torch.tensor([1, 2, 3, 4, 5, 6, 7],
dtype=torch.int32,
device=device)
index_row_splits = torch.tensor([0, 2, 2, 3, 7],
dtype=torch.int32,
device=device)
index_shape = k2.create_ragged_shape2(index_row_splits, None, 7)
index_values = torch.tensor([0, 3, 2, 3, 5, 1, 3],
dtype=torch.int32,
device=device)
ragged_index = k2.RaggedInt(index_shape, index_values)
ans = k2.index_tensor_with_ragged_int(src, ragged_index)
self.assertTrue(torch.allclose(ans.row_splits(1),
index_row_splits))
expected_values = torch.tensor([1, 4, 3, 4, 6, 2, 4],
dtype=torch.int32,
device=device)
self.assertTrue(torch.allclose(ans.values(), expected_values))
if __name__ == '__main__':
unittest.main()
| 39.549383 | 79 | 0.474325 |
79590ea2affde36f6ab85c172f0dfbd0b988546b | 5,299 | py | Python | market1501/make_hdf5_for_market1501.py | luweijia1013/ReID-2015 | ae415b3aa8cd3a1c0bd366284be5b106212c5473 | [
"MIT"
] | 3 | 2018-04-17T03:05:51.000Z | 2019-12-07T00:36:48.000Z | market1501/make_hdf5_for_market1501.py | luweijia1013/ReID-2015 | ae415b3aa8cd3a1c0bd366284be5b106212c5473 | [
"MIT"
] | null | null | null | market1501/make_hdf5_for_market1501.py | luweijia1013/ReID-2015 | ae415b3aa8cd3a1c0bd366284be5b106212c5473 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import h5py
import numpy as np
from PIL import Image
def make_positive_index_market1501(train_or_test = 'train',user_name = 'workspace'):
f = h5py.File('market1501_positive_index.h5')
path_list = get_image_path_list(train_or_test = train_or_test, system_user_name = user_name)
index = []
i = 0
print len(path_list)
while i < len(path_list):
j = i + 1 # i,j is the index of the photo
while j < len(path_list) and path_list[j][0:4] == path_list[i][0:4]: # [0:4] is the id of person (means same person)
if path_list[j][6] != path_list[i][6]: # [6] is the id of camera (means different camera)
index.append([path_list[i],path_list[j]])
index.append([path_list[j],path_list[i]])
print len(index)
j += 1
i += 1
print 'transforming the list to the numpy array......'
index = np.array(index)
print 'shuffling the numpy array......'
np.random.shuffle(index)
print 'storing the array to HDF5 file......'
f.create_dataset(train_or_test,data = index)
def make_test_hdf5(user_name='workspace'):
with h5py.File('test.h5') as f:
path_list = get_image_path_list(train_or_test='test')
i = path_list[0][0:4] #id
c = path_list[0][6] #camera
temp = []
fi = f.create_group(i)
for path in path_list:
print(path)
if path[0:4] == i:
if path[6] == c:
temp.append(np.array(Image.open(
'/home/' + user_name + '/market-1501/boundingboxtest/' + path)))
else:
print(len(temp))
fi.create_dataset(c, data=np.array(temp))
temp = []
c = path[6]
print(c)
temp.append(np.array(Image.open(
'/home/' + user_name + '/market-1501/boundingboxtest/' + path)))
else:
fi.create_dataset(c, data=np.array(temp))
i = path[0:4]
fi = f.create_group(i)
print (i)
c = path[6]
print (c)
temp = []
temp.append(np.array(Image.open(
'/home/' + user_name + '/market-1501/boundingboxtest/' + path)))
## there should still be some content in temp which is not stored into hdf5 file
def make_selftest_hdf5(user_name='workspace'):
with h5py.File('selftest.h5') as f:
path_list = get_image_path_list(train_or_test = 'selftest')
i = path_list[0][0:2]
## TO BE DONE (rule of naming)
def get_image_path_list(train_or_test = 'train', system_user_name = 'workspace'):
if train_or_test == 'train':
folder_path = '/home/' + system_user_name + '/market-1501/boundingboxtrain'
elif train_or_test == 'test':
folder_path = '/home/' + system_user_name + '/market-1501/boundingboxtest'
elif train_or_test == 'query':
folder_path = '/home/' + system_user_name + '/market-1501/query'
elif train_or_test == 'selftest':
folder_path = 'home/' + system_user_name + '/market-1501/myselftest'
assert os.path.isdir(folder_path)
if train_or_test == 'train' or train_or_test == 'query':
candidate = sorted(os.listdir(folder_path))
try:
index = candidate.index('Thumbs.db')
except ValueError: # no 'Thumbs.db' in folder
return candidate
else:
del candidate[index]
return candidate
elif train_or_test == 'test':
candidate = sorted(os.listdir(folder_path))[6617:] #6617? id which is 0000 or -1 is excluded
try:
index = candidate.index('Thumbs.db')
except ValueError: # no 'Thumbs.db' in folder
return candidate
else:
del candidate[index]
return candidate
elif train_or_test = 'selftest':
candidate = sorted(os.listdir(folderpath))
try:
index = candidate.index('Thumbs.db')
except ValueError: # no 'Thumbs.db' in folder
return candidate
else:
del candidate[index]
return candidate
def get_data_for_cmc(user_name = 'workspace'):
with h5py.File('test.h5','r') as f:
A = []
B = []
id_list = f.keys()
for i in id_list:
c_list = f[i].keys()
c1,c2 = np.random.choice(c_list,2) #randomly choose two cameras for every id
A.append(f[i][c1][np.random.randint(f[i][c1].shape[0])]) #randomly choose one pic from each camera
B.append(f[i][c2][np.random.randint(f[i][c2].shape[0])])
return np.array(A)/255.,np.array(B)/255.
if __name__ == '__main__':
user_name = raw_input('input your system user name:')
make_test_hdf5('workspace')
#make_positive_index_market1501('train',user_name=user_name)
#make_positive_index_market1501('test',user_name=user_name)
| 42.392 | 131 | 0.545763 |
79590f3f1736b1969d06bb65bc8f5b9f74a0cd1a | 41,567 | py | Python | resources/usr/lib/python2.7/dist-packages/numpy/polynomial/chebyshev.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/usr/lib/python2.7/dist-packages/numpy/polynomial/chebyshev.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/usr/lib/python2.7/dist-packages/numpy/polynomial/chebyshev.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | 1 | 2020-05-28T23:01:44.000Z | 2020-05-28T23:01:44.000Z | """
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebpow` -- raise a Chebyshev series to an positive integer power
- `chebval` -- evaluate a Chebyshev series at given points.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebpts1` -- Chebyshev points of the first kind.
- `chebpts2` -- Chebyshev points of the second kind.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series representing given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division
__all__ = ['chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline',
'chebadd', 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow',
'chebval', 'chebder', 'chebint', 'cheb2poly', 'poly2cheb',
'chebfromroots', 'chebvander', 'chebfit', 'chebtrim', 'chebroots',
'chebpts1', 'chebpts2', 'Chebyshev']
import numpy as np
import numpy.linalg as la
import polyutils as pu
import warnings
from polytemplate import polytemplate
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(cs) :
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
cs : 1-d ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-d ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = cs.size
zs = np.zeros(2*n-1, dtype=cs.dtype)
zs[n-1:] = cs/2
return zs + zs[::-1]
def _zseries_to_cseries(zs) :
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-d ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
cs : 1-d ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
cs = zs[n-1:].copy()
cs[1:n] *= 2
return cs
def _zseries_mul(z1, z2) :
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-d ndarray
The arrays must be 1-d but this is not checked.
Returns
-------
product : 1-d ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetic/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2) :
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-d ndarray
The arrays must be 1-d and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-d ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetic/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
uneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
len1 = len(z1)
len2 = len(z2)
if len2 == 1 :
z1 /= z2
return z1, z1[:1]*0
elif len1 < len2 :
return z1[:1]*0, z1
else :
dlen = len1 - len2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j :
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
z1[j:j+len2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
quo /= scl
rem = z1[i+1:i-1+len2].copy()
return quo, rem
def _zseries_der(zs) :
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs) :
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol) :
"""
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-d array containing the polynomial coefficients
Returns
-------
cs : ndarray
1-d array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
>>> P.poly2cheb(range(4))
array([ 1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1) :
res = chebadd(chebmulx(res), pol[i])
return res
def cheb2poly(cs) :
"""
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
cs : array_like
1-d array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-d array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(range(4))
>>> c
Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
>>> P.cheb2poly(range(4))
array([ -2., -8., 4., 12.])
"""
from polynomial import polyadd, polysub, polymulx
[cs] = pu.as_series([cs])
n = len(cs)
if n < 3:
return cs
else:
c0 = cs[-2]
c1 = cs[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1) :
tmp = c0
c0 = polysub(cs[i - 2], c1)
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1,1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0,1])
def chebline(off, scl) :
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0 :
return np.array([off,scl])
else :
return np.array([off])
def chebfromroots(roots) :
"""
Generate a Chebyshev series with the given roots.
Return the array of coefficients for the C-series whose roots (a.k.a.
"zeros") are given by *roots*. The returned array of coefficients is
ordered from lowest order "term" to highest, and zeros of multiplicity
greater than one must be included in *roots* a number of times equal
to their multiplicity (e.g., if `2` is a root of multiplicity three,
then [2,2,2] must be in *roots*).
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-d array of the C-series' coefficients, ordered from low to
high. If all roots are real, ``out.dtype`` is a float type;
otherwise, ``out.dtype`` is a complex type, even if all the
coefficients in the result are real (see Examples below).
See Also
--------
polyfromroots
Notes
-----
What is returned are the :math:`c_i` such that:
.. math::
\\sum_{i=0}^{n} c_i*T_i(x) = \\prod_{i=0}^{n} (x - roots[i])
where ``n == len(roots)`` and :math:`T_i(x)` is the `i`-th Chebyshev
(basis) polynomial over the domain `[-1,1]`. Note that, unlike
`polyfromroots`, due to the nature of the C-series basis set, the
above identity *does not* imply :math:`c_n = 1` identically (see
Examples).
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [chebline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [chebmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = chebmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebmulx(cs):
"""Multiply a Chebyshev series by x.
Multiply the polynomial `cs` by x, where x is the independent
variable.
Parameters
----------
cs : array_like
1-d array of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
# The zero series needs special treatment
if len(cs) == 1 and cs[0] == 0:
return cs
prd = np.empty(len(cs) + 1, dtype=cs.dtype)
prd[0] = cs[0]*0
prd[1] = cs[0]
if len(cs) > 1:
tmp = cs[1:]/2
prd[2:] = tmp
prd[0:-2] += tmp
return prd
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "re-project"
the product onto said basis set, which typically produces
"un-intuitive" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "re-project" the results onto said basis
set, which typically produces "un-intuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([ 0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(cs, pow, maxpower=16) :
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `cs` raised to the power `pow`. The
arguement `cs` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
cs : array_like
1d array of chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to umanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmul, chebdiv
Examples
--------
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=cs.dtype)
elif power == 1 :
return cs
else :
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(cs)
prd = zs
for i in range(2, power + 1) :
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(cs, m=1, scl=1) :
"""
Differentiate a Chebyshev series.
Returns the series `cs` differentiated `m` times. At each iteration the
result is multiplied by `scl` (the scaling factor is for use in a linear
change of variable). The argument `cs` is the sequence of coefficients
from lowest order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
cs: array_like
1-d array of Chebyshev series coefficients ordered from low to high.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"re-projected" onto the C-series basis set. Thus, typically, the
result of this function is "un-intuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> cs = (1,2,3,4)
>>> C.chebder(cs)
array([ 14., 12., 24.])
>>> C.chebder(cs,3)
array([ 96.])
>>> C.chebder(cs,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(cs,2,-1)
array([ 12., 96.])
"""
cnt = int(m)
if cnt != m:
raise ValueError, "The order of derivation must be integer"
if cnt < 0 :
raise ValueError, "The order of derivation must be non-negative"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
elif cnt >= len(cs):
return cs[:1]*0
else :
zs = _cseries_to_zseries(cs)
for i in range(cnt):
zs = _zseries_der(zs)*scl
return _zseries_to_cseries(zs)
def chebint(cs, m=1, k=[], lbnd=0, scl=1):
"""
Integrate a Chebyshev series.
Returns, as a C-series, the input C-series `cs`, integrated `m` times
from `lbnd` to `x`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `cs` is a sequence of
coefficients, from lowest order C-series "term" to highest, e.g.,
[1,2,3] represents the series :math:`T_0(x) + 2T_1(x) + 3T_2(x)`.
Parameters
----------
cs : array_like
1-d array of C-series coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a`
- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "re-projected" onto the C-series basis set. Thus, typically,
the result of this function is "un-intuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> cs = (1,2,3)
>>> C.chebint(cs)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
0.00625 ])
>>> C.chebint(cs, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,scl=-2)
array([-1., 1., -1., -1.])
"""
cnt = int(m)
if not np.iterable(k):
k = [k]
if cnt != m:
raise ValueError, "The order of integration must be integer"
if cnt < 0 :
raise ValueError, "The order of integration must be non-negative"
if len(k) > cnt :
raise ValueError, "Too many integration constants"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
n = len(cs)
cs *= scl
if n == 1 and cs[0] == 0:
cs[0] += k[i]
else:
zs = _cseries_to_zseries(cs)
zs = _zseries_int(zs)
cs = _zseries_to_cseries(zs)
cs[0] += k[i] - chebval(lbnd, cs)
return cs
def chebval(x, cs):
"""Evaluate a Chebyshev series.
If `cs` is of length `n`, this function returns :
``p(x) = cs[0]*T_0(x) + cs[1]*T_1(x) + ... + cs[n-1]*T_{n-1}(x)``
If x is a sequence or array then p(x) will have the same shape as x.
If r is a ring_like object that supports multiplication and addition
by the values in `cs`, then an object of the same type is returned.
Parameters
----------
x : array_like, ring_like
Array of numbers or objects that support multiplication and
addition with themselves and with the elements of `cs`.
cs : array_like
1-d array of Chebyshev coefficients ordered from low to high.
Returns
-------
values : ndarray, ring_like
If the return is an ndarray then it has the same shape as `x`.
See Also
--------
chebfit
Examples
--------
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if isinstance(x, tuple) or isinstance(x, list) :
x = np.asarray(x)
if len(cs) == 1 :
c0 = cs[0]
c1 = 0
elif len(cs) == 2 :
c0 = cs[0]
c1 = cs[1]
else :
x2 = 2*x
c0 = cs[-2]
c1 = cs[-1]
for i in range(3, len(cs) + 1) :
tmp = c0
c0 = cs[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebvander(x, deg) :
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points `x`.
This isn't a true Vandermonde matrix because `x` can be an arbitrary
ndarray and the Chebyshev polynomials aren't powers. If ``V`` is the
returned matrix and `x` is a 2d array, then the elements of ``V`` are
``V[i,j,k] = T_k(x[i,j])``, where ``T_k`` is the Chebyshev polynomial
of degree ``k``.
Parameters
----------
x : array_like
Array of points. The values are converted to double or complex
doubles. If x is scalar it is converted to a 1D array.
deg : integer
Degree of the resulting matrix.
Returns
-------
vander : Vandermonde matrix.
The shape of the returned matrix is ``x.shape + (deg+1,)``. The last
index is the degree.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
v = np.empty((ideg + 1,) + x.shape, dtype=x.dtype)
# Use forward recursion to generate the entries.
v[0] = x*0 + 1
if ideg > 0 :
x2 = 2*x
v[1] = x
for i in range(2, ideg + 1) :
v[i] = v[i-1]*x2 - v[i-2]
return np.rollaxis(v, 0, v.ndim)
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting series
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : present when `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
polyfit, legfit, lagfit, hermfit, hermefit
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
chebweight : Chebyshev weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Chebyshev series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coeficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if len(x) != len(y):
raise TypeError, "expected x and y to have same length"
# set up the least squares matrices
lhs = chebvander(x, deg)
rhs = y
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError, "expected 1D vector for w"
if len(x) != len(w):
raise TypeError, "expected x and w to have same length"
# apply weights
if rhs.ndim == 2:
lhs *= w[:, np.newaxis]
rhs *= w[:, np.newaxis]
else:
lhs *= w[:, np.newaxis]
rhs *= w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# scale the design matrix and solve the least squares equation
scl = np.sqrt((lhs*lhs).sum(0))
c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def chebcompanion(cs):
"""Return the scaled companion matrix of cs.
The basis polynomials are scaled so that the companion matrix is
symmetric when `cs` represents a single Chebyshev polynomial. This
provides better eigenvalue estimates than the unscaled case and in the
single polynomial case the eigenvalues are guaranteed to be real if
np.eigvalsh is used to obtain them.
Parameters
----------
cs : array_like
1-d array of Legendre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if len(cs) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(cs) == 2:
return np.array(-cs[0]/cs[1])
n = len(cs) - 1
mat = np.zeros((n, n), dtype=cs.dtype)
scl = np.array([1.] + [np.sqrt(.5)]*(n-1))
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[0] = np.sqrt(.5)
top[1:] = 1/2
bot[...] = top
mat[:,-1] -= (cs[:-1]/cs[-1])*(scl/scl[-1])*.5
return mat
def chebroots(cs):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a "zeros") of the C-series represented by `cs`,
which is the sequence of the C-series' coefficients from lowest order
"term" to highest, e.g., [1,2,3] represents the C-series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
cs : array_like
1-d array of C-series coefficients ordered from low to high.
Returns
-------
out : ndarray
Array of the roots. If all the roots are real, then so is the
dtype of ``out``; otherwise, ``out``'s dtype is complex.
See Also
--------
polyroots
Notes
-----
Algorithm(s) used:
Remember: because the C-series basis set is different from the
"standard" basis set, the results of this function *may* not be what
one is expecting.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if len(cs) < 2:
return np.array([], dtype=cs.dtype)
if len(cs) == 2:
return np.array([-cs[0]/cs[1]])
m = chebcompanion(cs)
r = la.eigvals(m)
r.sort()
return r
def chebpts1(npts):
"""Chebyshev points of the first kind.
Chebyshev points of the first kind are the set ``{cos(x_k)}``,
where ``x_k = pi*(k + .5)/npts`` for k in ``range(npts}``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the second kind.
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 1:
raise ValueError("npts must be >= 1")
x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)
return np.cos(x)
def chebpts2(npts):
"""Chebyshev points of the second kind.
Chebyshev points of the second kind are the set ``{cos(x_k)}``,
where ``x_k = pi*/(npts - 1)`` for k in ``range(npts}``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the second kind.
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 2:
raise ValueError("npts must be >= 2")
x = np.linspace(-np.pi, 0, _npts)
return np.cos(x)
#
# Chebyshev series class
#
exec polytemplate.substitute(name='Chebyshev', nick='cheb', domain='[-1,1]')
| 28.64714 | 79 | 0.596387 |
7959105a7099848bfde754a5af3e191bd07de5f1 | 1,833 | py | Python | Mining/Mind.py | penut85420/FinMind | df2aa29d6fbf180d5a701bb2c207197e9324f857 | [
"Apache-2.0"
] | null | null | null | Mining/Mind.py | penut85420/FinMind | df2aa29d6fbf180d5a701bb2c207197e9324f857 | [
"Apache-2.0"
] | null | null | null | Mining/Mind.py | penut85420/FinMind | df2aa29d6fbf180d5a701bb2c207197e9324f857 | [
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
import requests
from FinMind.Data import Load
def MoveAverage(stock_price,days = 5,variable = 'close'):
# variable = 'close'
# days = 5
return stock_price[variable].rolling(window = days).mean()
def RSV(stock_price,days = 9):
sp = stock_price
#rsv_list = []
data = pd.DataFrame()
data['rolling_min'] = sp['min'].rolling(window = days).min()
data['rolling_max'] = sp['max'].rolling(window = days).max()
data['close'] = sp['close']
data['date'] = sp['date']
rsv = (data['close'] - data['rolling_min'])/(data['rolling_max']-data['rolling_min'])
rsv = round(rsv,2)*100
return rsv
def BIAS(stock_price,days = 9):
sp = stock_price
#rsv_list = []
data = pd.DataFrame()
data['mean_close'] = sp['close'].rolling(window = days).mean()
data['close'] = sp['close']
data['date'] = sp['date']
bias = (data['close']-data['mean_close'])/data['mean_close']*100
bias = round(bias,2)
return bias
def transpose(data):
select_variable = 'stock_id'
date = list( np.unique(data['date']) )
data1 = pd.DataFrame()
select_var_list = list( np.unique(data[select_variable]) )
for d in date:# d = date[0]
#data1 = data[]
for select_var in select_var_list:
data2 = data.loc[(data['date']==d) & ( data[select_variable] == select_var ),
['type','value']]
data2.index = data2['type']
del data2['type']
data2 = data2.T
data2.index = range(len(data2))
data2.columns = list(data2.columns)
data2['stock_id'] = select_var
data2['date'] = d
data1 = data1.append(data2)
data1.index = range(len(data1))
return data1
| 29.095238 | 89 | 0.570104 |
795910d35c5159642e4f8c92b60c5f9b7b616842 | 3,002 | py | Python | dgfbot/views.py | olegaobini/dontGetFiredBot | 4421170337e9ffbc356e7c97db2cb3a70ce89c69 | [
"MIT"
] | 1 | 2022-02-21T02:18:28.000Z | 2022-02-21T02:18:28.000Z | dgfbot/views.py | olegaobini/dontGetFiredBot | 4421170337e9ffbc356e7c97db2cb3a70ce89c69 | [
"MIT"
] | null | null | null | dgfbot/views.py | olegaobini/dontGetFiredBot | 4421170337e9ffbc356e7c97db2cb3a70ce89c69 | [
"MIT"
] | null | null | null | from dgfbot.device_facade import *
from dgfbot.utils import similar_arr_in_list, checkImage, ImageDraw, Image
import numpy
"""
hiring process view
dialog view
CS 640x1520
"""
screenPath = "screenshots\\screenshot.png"
class mainView:
def __init__(self, device: DeviceFacade):
self.device = device
self.dimensions = (1080, 2316)
# TODO Adjust dimension to advice
# self.dimensions = getDimensions(self.device)
# TODO fix check image and _get_health_bar
def is_visible(self) -> bool:
width, height = self._get_health_bar_location()
return similar_arr_in_list(
checkImage(screenPath, width, height), self.workers_alert_indicators
)
def _get_health_bar_location(self) -> tuple[int, int]:
height = 250
width = 400
return (width, height)
#
def employeeLocations(self) -> list[str:list]:
_, height = self.dimensions
first_row_val = int(0.66 * height) - 30
second_row_val = int(0.725 * height) - 460
third_row_val = 1020 - 40
return {
"INTERN": [900, first_row_val],
"CS": [673, first_row_val],
"FS": [420, first_row_val],
"AM": [180, first_row_val],
"M": [955, second_row_val],
"DGM": [715, second_row_val],
"GM": [430, second_row_val],
"D": [190, second_row_val],
"MD": [900, third_row_val],
"SMD": [570, third_row_val],
"VP": [200, third_row_val],
}
def testing(self):
with Image.open(screenPath) as im:
draw = ImageDraw.Draw(im)
for location in self.employeeLocations().items():
r = 18
x = location[1][0]
y = location[1][1]
draw.ellipse([(x - r, y - r), (x + r, y + r)], fill="red", width=10)
im.show()
workers_alert_indicators = [
# yellow
numpy.array([254, 230, 129, 255]),
# yellow
numpy.array([255, 230, 129, 255]),
# red
numpy.array([227, 43, 56, 255]),
# red
numpy.array([228, 43, 56, 255]),
# health bar red
numpy.array([227, 32, 32, 255]),
# health bar red
numpy.array([247, 32, 32, 255]),
# green
numpy.array([0, 230, 0, 255]),
# green
numpy.array([0, 43, 0, 255]),
]
class hiringView:
def __init__(self, device: DeviceFacade):
self.device = device
def is_visible(self) -> bool:
width, height = self._get_hiring_building()
return similar_arr_in_list(
checkImage(screenPath, width, height), self.workers_alert_indicators
)
def _get_hiring_indicator():
width = 540
height = 20
return (width, height)
hiring_view_indicators = [numpy.array(169, 173, 172, 255)]
| 29.722772 | 85 | 0.541306 |
795911bc2020fdc400f56ddafd661d5d2cad1e61 | 2,848 | py | Python | bbs/settings.py | wangqiang001/bbs | 3b7193b948e8c2a3c3e5d42471c9bd29a73b7a6a | [
"MIT"
] | null | null | null | bbs/settings.py | wangqiang001/bbs | 3b7193b948e8c2a3c3e5d42471c9bd29a73b7a6a | [
"MIT"
] | 2 | 2019-01-22T11:48:17.000Z | 2019-01-22T12:04:17.000Z | bbs/settings.py | wangqiang001/bbs | 3b7193b948e8c2a3c3e5d42471c9bd29a73b7a6a | [
"MIT"
] | null | null | null | """
Django settings for bbs project.
Generated by 'django-admin startproject' using Django 1.11.15.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'aj%$f_d#3!h7)ha*s_lryvq+(ncdin5p9&g^xmu_5a7bce8v9g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'post',
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bbs.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'bbs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/statics/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "statics"),
]
| 23.733333 | 91 | 0.692065 |
795911fea2965b162c93164c04d040ea51055665 | 1,042 | py | Python | src/EmotionsRecognizer/text_process/predict_text.py | Ahostility/AnalyzePeople-1 | edcd70fd6b236f4fafdbe4565b8a4975c1e084e4 | [
"Apache-2.0"
] | null | null | null | src/EmotionsRecognizer/text_process/predict_text.py | Ahostility/AnalyzePeople-1 | edcd70fd6b236f4fafdbe4565b8a4975c1e084e4 | [
"Apache-2.0"
] | null | null | null | src/EmotionsRecognizer/text_process/predict_text.py | Ahostility/AnalyzePeople-1 | edcd70fd6b236f4fafdbe4565b8a4975c1e084e4 | [
"Apache-2.0"
] | null | null | null | from dostoevsky.tokenization import RegexTokenizer
from dostoevsky.models import FastTextSocialNetworkModel
from .parse_kaldi import open_text
tokenizer = RegexTokenizer() # токенизатор текста
text_model = FastTextSocialNetworkModel(tokenizer=tokenizer) # модель анализа тональности
to_numbers = {
'negative': 0,
'neutral': 1,
'positive': 2,
'skip': 1,
'speech': 1,
}
def choose_sentiment(pred):
return sorted(pred, key=lambda x: pred[x])[-1]
def predict_sentiment(texts):
preds = text_model.predict(texts)
return list(map(lambda pred: choose_sentiment(pred), preds))
def predict_by_text_file(text_path):
text = open_text(text_path).strip()
if text == '':
return 1
return predict_by_texts([text])[0]
def predict_by_parsed_kaldi(person_texts):
res = dict()
for person in person_texts.keys():
res[person] = predict_by_texts(person_texts[person])
return res
def predict_by_texts(texts):
return list(map(lambda x: to_numbers[x], predict_sentiment(texts)))
| 24.809524 | 90 | 0.71881 |
795912988bed0500d18b583bb5b81addce072f78 | 1,532 | py | Python | acmicpc/7576/7576.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 3 | 2019-03-09T05:19:23.000Z | 2019-04-06T09:26:36.000Z | acmicpc/7576/7576.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2020-02-23T10:38:04.000Z | 2020-02-23T10:38:04.000Z | acmicpc/7576/7576.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2019-05-22T13:47:53.000Z | 2019-05-22T13:47:53.000Z | def bfs(farm:list, tomatoes:list, tcount:int, urcount:int)->int:
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
is_changed = True
count = 0
while is_changed:
next_tomatoes = []
count += 1
is_changed = False
while tomatoes:
tomato = tomatoes.pop()
x, y = tomato
for i in range(4):
temp_x = x + dx[i]
temp_y = y + dy[i]
if temp_x < 0 or temp_y < 0 or temp_x >= N or temp_y >= M:
continue
value = farm[temp_x][temp_y]
if value == 0:
is_changed = True
urcount -= 1
if urcount == 0:
return count
farm[temp_x][temp_y] = 1
next_tomatoes.append((temp_x, temp_y))
tomatoes = next_tomatoes
return -1
M, N = map(int, input().split())
rows = []
tomato = []
tomato_count = 0
unripe_count = 0
empty_count = 0
for _ in range(N):
row = list(map(int, input().strip().split()))
rows.append(row)
for i in range(N):
for j in range(M):
value = rows[i][j]
if value == 0:
unripe_count += 1
elif value == 1:
tomato_count += 1
tomato.append((i, j))
elif value == -1:
empty_count += 1
if tomato_count == 0:
print(-1)
elif tomato_count + empty_count == N*M:
print(0)
else:
print(bfs(rows, tomato, tomato_count, unripe_count))
| 25.966102 | 74 | 0.474543 |
795913526defac367a98ece52152f73e6c54bfe1 | 6,298 | py | Python | zvmsdk/sdkwsgi/validation/parameter_types.py | jichenjc/python-zvm-sdk | c081805c6079107b4823af898babdf92cf5577ee | [
"Apache-2.0"
] | null | null | null | zvmsdk/sdkwsgi/validation/parameter_types.py | jichenjc/python-zvm-sdk | c081805c6079107b4823af898babdf92cf5577ee | [
"Apache-2.0"
] | null | null | null | zvmsdk/sdkwsgi/validation/parameter_types.py | jichenjc/python-zvm-sdk | c081805c6079107b4823af898babdf92cf5577ee | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import unicodedata
import six
def single_param(schema):
ret = multi_params(schema)
ret['maxItems'] = 1
return ret
def multi_params(schema):
return {'type': 'array', 'items': schema}
class ValidationRegex(object):
def __init__(self, regex, reason):
self.regex = regex
self.reason = reason
def _is_printable(char):
"""determine if a unicode code point is printable.
This checks if the character is either "other" (mostly control
codes), or a non-horizontal space. All characters that don't match
those criteria are considered printable; that is: letters;
combining marks; numbers; punctuation; symbols; (horizontal) space
separators.
"""
category = unicodedata.category(char)
return (not category.startswith("C") and
(not category.startswith("Z") or category == "Zs"))
def _get_all_chars():
for i in range(0xFFFF):
yield six.unichr(i)
def _build_regex_range(ws=True, invert=False, exclude=None):
"""Build a range regex for a set of characters in utf8.
This builds a valid range regex for characters in utf8 by
iterating the entire space and building up a set of x-y ranges for
all the characters we find which are valid.
:param ws: should we include whitespace in this range.
:param exclude: any characters we want to exclude
:param invert: invert the logic
The inversion is useful when we want to generate a set of ranges
which is everything that's not a certain class. For instance,
produce all all the non printable characters as a set of ranges.
"""
if exclude is None:
exclude = []
regex = ""
# are we currently in a range
in_range = False
# last character we found, for closing ranges
last = None
# last character we added to the regex, this lets us know that we
# already have B in the range, which means we don't need to close
# it out with B-B. While the later seems to work, it's kind of bad form.
last_added = None
def valid_char(char):
if char in exclude:
result = False
elif ws:
result = _is_printable(char)
else:
# Zs is the unicode class for space characters, of which
# there are about 10 in this range.
result = (_is_printable(char) and
unicodedata.category(char) != "Zs")
if invert is True:
return not result
return result
# iterate through the entire character range. in_
for c in _get_all_chars():
if valid_char(c):
if not in_range:
regex += re.escape(c)
last_added = c
in_range = True
else:
if in_range and last != last_added:
regex += "-" + re.escape(last)
in_range = False
last = c
else:
if in_range:
regex += "-" + re.escape(c)
return regex
valid_name_regex_base = '^(?![%s])[%s]*(?<![%s])$'
valid_name_regex = ValidationRegex(
valid_name_regex_base % (
_build_regex_range(ws=False, invert=True),
_build_regex_range(),
_build_regex_range(ws=False, invert=True)),
"printable characters. Can not start or end with whitespace.")
name = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'format': 'name'
}
positive_integer = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 1
}
ipv4 = {
'type': 'string', 'format': 'ipv4'
}
nic_info = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'nic_id': {'type': 'string'},
'mac_addr': {'type': 'string'}
},
'additionalProperties': False
}
}
boolean = {
'type': ['boolean', 'string'],
'enum': [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on',
'YES', 'Yes', 'yes',
False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', 'off',
'NO', 'No', 'no']
}
rdev_list = {
'type': ['string'],
'pattern': '^([0-9a-fA-F]{,4})(\s+[0-9a-fA-F]{,4}){,2}$'
}
rdev = {
'type': ['string'], 'minLength': 1, 'maxLength': 4,
'pattern': '^[0-9a-fA-F]{,4}$'
}
vdev = {
'type': ['string'], 'minLength': 1, 'maxLength': 4,
'pattern': '^[0-9a-fA-F]{,4}$'
}
url = {
'type': ['string'],
# FIXME: uri cannot validate url, need accurate definition
'format': 'uri'
}
mac_address = {
'type': 'string',
'pattern': '^([0-9a-fA-F]{2})(:[0-9a-fA-F]{2}){5}$'
}
remotehost = {
'type': ['string'],
'format': 'hostname'
}
userid = {
'type': ['string'], 'minLength': 1, 'maxLength': 8
}
controller = {
'type': ['string'],
'anyOf': [
{'pattern': '\*'},
{'minLength': 1, 'maxLength': 8}
]
}
nic_id = {
'type': ['string']
}
userid_list = {
'type': ['string'],
# TODO:validate userid_list in inspect APIs
'pattern': '^(\w{,8})(,\w{,8}){0,}$'
}
disk_list = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'size': {'type': 'string'},
# TODO: set format to enum
'format': {'type': 'string'},
'is_boot_disk': boolean,
'disk_pool': {'type': 'string', 'pattern': '^\w+:\w+$'}
},
'required': ['size'],
'additionalProperties': False
}
}
image_meta = {
'type': 'object',
'properties': {
'os_version': {'type': 'string'},
# md5 shoule be 32 hexadeciaml numbers
'md5sum': {'type': 'string', 'pattern': '^[0-9a-fA-F]{32}$'}
},
'required': ['os_version', 'md5sum'],
'additionalProperties': False
}
| 24.893281 | 78 | 0.572563 |
7959139f83b81e1fa0bc5d4aaa1b624585313fe3 | 1,185 | py | Python | test/test_scheduled_task_details_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | test/test_scheduled_task_details_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | test/test_scheduled_task_details_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import octopus_deploy_swagger_client
from octopus_deploy_swagger_client.models.scheduled_task_details_resource import ScheduledTaskDetailsResource # noqa: E501
from octopus_deploy_swagger_client.rest import ApiException
class TestScheduledTaskDetailsResource(unittest.TestCase):
"""ScheduledTaskDetailsResource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testScheduledTaskDetailsResource(self):
"""Test ScheduledTaskDetailsResource"""
# FIXME: construct object with mandatory attributes with example values
# model = octopus_deploy_swagger_client.models.scheduled_task_details_resource.ScheduledTaskDetailsResource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 28.902439 | 131 | 0.763713 |
795913a976b849dfe9f09972fbeb46eef68fc598 | 1,755 | py | Python | 05_weather_client/program.py | CarlosJimeno/Python-JumpStart-by-Building-10-apps | 9111ae2eb1eb61dc368c1eb681711bc40d902b52 | [
"MIT"
] | null | null | null | 05_weather_client/program.py | CarlosJimeno/Python-JumpStart-by-Building-10-apps | 9111ae2eb1eb61dc368c1eb681711bc40d902b52 | [
"MIT"
] | null | null | null | 05_weather_client/program.py | CarlosJimeno/Python-JumpStart-by-Building-10-apps | 9111ae2eb1eb61dc368c1eb681711bc40d902b52 | [
"MIT"
] | null | null | null | import requests
import bs4
import collections
WeatherReport = collections.namedtuple('WeatherReport',
'cond, temp, scale, loc')
def main():
print_header()
code = input('What zipcode do you want to use? ')
html = get_html_from_web(code)
report = get_weather_from_html(html)
print('The temperature in {} is {} {} and {}'.format(
report.loc,
report.temp,
report.scale,
report.cond
))
def print_header():
print('-------------------------------------')
print(' WEATHER APP')
print('-------------------------------------\n')
def get_html_from_web(zipcode):
url = 'http://www.wunderground.com/weather-forecast/{}'.format(zipcode)
response = requests.get(url)
# print(response.status_code)
return response.text
def get_weather_from_html(html):
soup = bs4.BeautifulSoup(html, 'html.parser')
loc = soup.find(class_='region-content-header').find('h1').get_text()
condition = soup.find(class_='condition-icon').get_text()
temp = soup.find(class_='wu-unit-temperature').find(class_='wu-value').get_text()
scale = soup.find(class_='wu-unit-temperature').find(class_='wu-label').get_text()
loc = cleanup_text(loc)
loc = find_city_and_state_from_location(loc)
condition = cleanup_text(condition)
temp = cleanup_text(temp)
scale = cleanup_text(scale)
return WeatherReport(cond=condition, temp=temp, scale=scale, loc=loc)
def cleanup_text(text: str):
if not text:
return text
text = text.strip()
return text
def find_city_and_state_from_location(loc: str):
parts = loc.split('\n')
return parts[0].strip()
if __name__ == '__main__':
main()
| 24.375 | 86 | 0.621083 |
795913f5a00a01f8fde3d57cdb8455a06956c360 | 1,903 | py | Python | pyrevolve/genotype/lsystem_neat/crossover.py | braj29/robo_swimmers | b3c3fa91976884095eb6b5e67844167598ec573d | [
"Apache-1.1"
] | null | null | null | pyrevolve/genotype/lsystem_neat/crossover.py | braj29/robo_swimmers | b3c3fa91976884095eb6b5e67844167598ec573d | [
"Apache-1.1"
] | null | null | null | pyrevolve/genotype/lsystem_neat/crossover.py | braj29/robo_swimmers | b3c3fa91976884095eb6b5e67844167598ec573d | [
"Apache-1.1"
] | null | null | null | from pyrevolve.genotype.plasticoding.plasticoding import Plasticoding, Alphabet
from pyrevolve.genotype.lsystem_neat.lsystem_neat_genotype import LSystemCPGHyperNEATGenotype
from pyrevolve.genotype.neat_brain_genome.crossover import NEATCrossoverConf
from pyrevolve.genotype.neat_brain_genome.crossover import standard_crossover as NEATBrainCrossover
from pyrevolve.genotype.plasticoding.crossover.standard_crossover import generate_child_genotype as PlasticodingCrossover
import random
class CrossoverConfig:
def __init__(self,
crossover_prob):
"""
Creates a Crossover object that sets the configuration for the crossover operator
:param crossover_prob: crossover probability
"""
self.crossover_prob = crossover_prob
def standard_crossover(parents, lsystem_conf, crossover_conf):
"""
Creates an child (individual) through crossover with two parents
:param parents: Parents type Individual
:param lsystem_conf: LSystemCPGHyperNEATGenotypeConfig type with config for NEAT and Plasticoding
:param crossover_conf: CrossoverConfig for lsystem crossover type
:return: brain and body crossover (Only body right now)
"""
assert len(parents) == 2
parents_body_genotype = [p.genotype._body_genome for p in parents]
parents_brain_genotypes = [pair for pair in zip(parents[0].genotype._brain_genomes, parents[1].genotype._brain_genomes)]
child_genotype = LSystemCPGHyperNEATGenotype()
Neatconf = NEATCrossoverConf()
new_body = PlasticodingCrossover(parents_body_genotype, lsystem_conf.plasticoding, crossover_conf)
new_brain = []
for g1, g2 in parents_brain_genotypes:
new_brain.append(NEATBrainCrossover([g1, g2], Neatconf, crossover_conf, lsystem_conf))
child_genotype._body_genome = new_body
child_genotype._brain_genomes = new_brain
return child_genotype
| 38.836735 | 124 | 0.781923 |
795914e15637b1885ef00fd4c774b7ed3e870a32 | 98 | py | Python | scripts/portal/in_ban.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/portal/in_ban.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/portal/in_ban.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # Created by MechAviv
# ID :: [865000000]
# Commerci Republic : San Commerci
sm.warp(865000003, 1) | 24.5 | 34 | 0.72449 |
7959153a759657ee7eb8bb6269c0b99a12dab94b | 8,939 | py | Python | tf_agents/policies/py_tf_policy.py | Francis777/agents | 24e878a697be418307cfbff69724d86be767719d | [
"Apache-2.0"
] | null | null | null | tf_agents/policies/py_tf_policy.py | Francis777/agents | 24e878a697be418307cfbff69724d86be767719d | [
"Apache-2.0"
] | null | null | null | tf_agents/policies/py_tf_policy.py | Francis777/agents | 24e878a697be418307cfbff69724d86be767719d | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts TensorFlow Policies into Python Policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
from absl import logging
import tensorflow as tf
from tf_agents.policies import py_policy
from tf_agents.policies import tf_policy
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
from tf_agents.utils import common
from tf_agents.utils import nest_utils
from tf_agents.utils import session_utils
class PyTFPolicy(py_policy.PyPolicy, session_utils.SessionUser):
"""Exposes a Python policy as wrapper over a TF Policy."""
_time_step = ... # type: ts.TimeStep
_policy_state = ... # type: types.NestedPlaceHolder
_action_step = ... # type: policy_step.PolicyStep
# TODO(damienv): currently, the initial policy state must be batched
# if batch_size is given. Without losing too much generality, the initial
# policy state could be the same for every element in the batch.
# In that case, the initial policy state could be given with no batch
# dimension.
# TODO(sfishman): Remove batch_size param entirely.
def __init__(self,
policy: tf_policy.TFPolicy,
batch_size: Optional[int] = None,
seed: Optional[types.Seed] = None):
"""Initializes a new `PyTFPolicy`.
Args:
policy: A TF Policy implementing `tf_policy.TFPolicy`.
batch_size: (deprecated)
seed: Seed to use if policy performs random actions (optional).
"""
if not isinstance(policy, tf_policy.TFPolicy):
logging.warning('Policy should implement tf_policy.TFPolicy')
if batch_size is not None:
logging.warning('In PyTFPolicy constructor, `batch_size` is deprecated, '
'this parameter has no effect. This argument will be '
'removed on 2019-05-01')
time_step_spec = tensor_spec.to_nest_array_spec(policy.time_step_spec)
action_spec = tensor_spec.to_nest_array_spec(policy.action_spec)
super(PyTFPolicy, self).__init__(
time_step_spec, action_spec, policy_state_spec=(), info_spec=())
self._tf_policy = policy
self.session = None
self._policy_state_spec = tensor_spec.to_nest_array_spec(
self._tf_policy.policy_state_spec)
self._batch_size = None
self._batched = None
self._seed = seed
self._built = False
def _construct(self, batch_size, graph):
"""Construct the agent graph through placeholders."""
self._batch_size = batch_size
self._batched = batch_size is not None
outer_dims = [self._batch_size] if self._batched else [1]
with graph.as_default():
self._time_step = tensor_spec.to_nest_placeholder(
self._tf_policy.time_step_spec, outer_dims=outer_dims)
self._tf_initial_state = self._tf_policy.get_initial_state(
batch_size=self._batch_size or 1)
self._policy_state = tf.nest.map_structure(
lambda ps: tf.compat.v1.placeholder( # pylint: disable=g-long-lambda
ps.dtype,
ps.shape,
name='policy_state'),
self._tf_initial_state)
self._action_step = self._tf_policy.action(
self._time_step, self._policy_state, seed=self._seed)
def initialize(self,
batch_size: Optional[int],
graph: Optional[tf.Graph] = None):
if self._built:
raise RuntimeError('PyTFPolicy can only be initialized once.')
if not graph:
graph = tf.compat.v1.get_default_graph()
self._construct(batch_size, graph)
var_list = tf.nest.flatten(self._tf_policy.variables())
common.initialize_uninitialized_variables(self.session, var_list)
self._built = True
def save(self,
policy_dir: Optional[Text] = None,
graph: Optional[tf.Graph] = None):
if not self._built:
raise RuntimeError('PyTFPolicy has not been initialized yet.')
if not graph:
graph = tf.compat.v1.get_default_graph()
with graph.as_default():
global_step = tf.compat.v1.train.get_or_create_global_step()
policy_checkpointer = common.Checkpointer(
ckpt_dir=policy_dir, policy=self._tf_policy, global_step=global_step)
policy_checkpointer.initialize_or_restore(self.session)
with self.session.as_default():
policy_checkpointer.save(global_step)
def restore(self,
policy_dir: Text,
graph: Optional[tf.Graph] = None,
assert_consumed: bool = True):
"""Restores the policy from the checkpoint.
Args:
policy_dir: Directory with the checkpoint.
graph: A graph, inside which policy the is restored (optional).
assert_consumed: If true, contents of the checkpoint will be checked
for a match against graph variables.
Returns:
step: Global step associated with the restored policy checkpoint.
Raises:
RuntimeError: if the policy is not initialized.
AssertionError: if the checkpoint contains variables which do not have
matching names in the graph, and assert_consumed is set to True.
"""
if not self._built:
raise RuntimeError(
'PyTFPolicy must be initialized before being restored.')
if not graph:
graph = tf.compat.v1.get_default_graph()
with graph.as_default():
global_step = tf.compat.v1.train.get_or_create_global_step()
policy_checkpointer = common.Checkpointer(
ckpt_dir=policy_dir, policy=self._tf_policy, global_step=global_step)
status = policy_checkpointer.initialize_or_restore(self.session)
with self.session.as_default():
if assert_consumed:
status.assert_consumed()
status.run_restore_ops()
return self.session.run(global_step)
def _build_from_time_step(self, time_step):
outer_shape = nest_utils.get_outer_array_shape(time_step,
self._time_step_spec)
if len(outer_shape) == 1:
self.initialize(outer_shape[0])
elif not outer_shape:
self.initialize(None)
else:
raise ValueError(
'Cannot handle more than one outer dimension. Saw {} outer '
'dimensions: {}'.format(len(outer_shape), outer_shape))
def _get_initial_state(self, batch_size):
if not self._built:
self.initialize(batch_size)
if batch_size not in [self._batch_size, self._batch_size or 1]:
raise ValueError(
'`batch_size` argument is different from the batch size provided '
'previously. Expected {}, but saw {}.'.format(self._batch_size,
batch_size))
return self.session.run(self._tf_initial_state)
def _action(self, time_step, policy_state, seed: Optional[types.Seed] = None):
if seed is not None:
raise ValueError('`seed` is passed to the class as an argument.')
if not self._built:
self._build_from_time_step(time_step)
batch_size = None
if time_step.step_type.shape:
batch_size = time_step.step_type.shape[0]
if self._batch_size != batch_size:
raise ValueError(
'The batch size of time_step is different from the batch size '
'provided previously. Expected {}, but saw {}.'.format(
self._batch_size, batch_size))
if not self._batched:
# Since policy_state is given in a batched form from the policy and we
# simply have to send it back we do not need to worry about it. Only
# update time_step.
time_step = nest_utils.batch_nested_array(time_step)
nest_utils.assert_same_structure(self._time_step, time_step)
feed_dict = {self._time_step: time_step}
if policy_state is not None:
# Flatten policy_state to handle specs that are not hashable due to lists.
for state_ph, state in zip(
tf.nest.flatten(self._policy_state), tf.nest.flatten(policy_state)):
feed_dict[state_ph] = state
action_step = self.session.run(self._action_step, feed_dict)
action, state, info = action_step
if not self._batched:
action, info = nest_utils.unbatch_nested_array([action, info])
return policy_step.PolicyStep(action, state, info)
| 38.038298 | 80 | 0.695604 |
79591658b4bedf95b8eafb0e23471e9714785784 | 1,533 | py | Python | tools/configen/tests/test_modules/future_annotations.py | sara-nl/hydra | 8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7 | [
"MIT"
] | null | null | null | tools/configen/tests/test_modules/future_annotations.py | sara-nl/hydra | 8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7 | [
"MIT"
] | null | null | null | tools/configen/tests/test_modules/future_annotations.py | sara-nl/hydra | 8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from __future__ import annotations # type: ignore # noqa: F407
from dataclasses import dataclass
from typing import List, Optional
from omegaconf import MISSING
@dataclass
class User:
name: str = MISSING
age: int = MISSING
class LibraryClass:
"""
Some class from a user library that is incompatible with OmegaConf config
"""
def __eq__(self, other):
return isinstance(other, type(self))
class ExampleClass:
def __init__(
self,
no_default: float,
lst: List[str],
passthrough_list: List[LibraryClass],
dataclass_val: List[User],
def_value: List[str] = [],
default_str="Bond, James Bond",
none_str: Optional[str] = None,
):
self.no_default = no_default
self.lst = lst
self.passthrough_list = passthrough_list
self.dataclass_val = dataclass_val
self.def_value = def_value
self.default_str: str = default_str
self.none_str = none_str
def __eq__(self, other):
return (
isinstance(other, type(self))
and self.no_default == other.no_default
and self.lst == other.lst
and self.passthrough_list == other.passthrough_list
and self.dataclass_val == other.dataclass_val
and self.def_value == other.def_value
and self.default_str == other.default_str
and self.none_str == other.none_str
)
| 27.872727 | 77 | 0.634703 |
7959185032c26caa9f9c672ddcbb4d456b467c18 | 647 | py | Python | tests/migrations/test_migrations_first/second.py | iMerica/dj-models | fbe4a55ac362f9355a2298f58aa0deb0b6082e19 | [
"BSD-3-Clause"
] | 5 | 2019-02-15T16:47:50.000Z | 2021-12-26T18:52:23.000Z | tests/migrations/test_migrations_first/second.py | iMerica/dj-models | fbe4a55ac362f9355a2298f58aa0deb0b6082e19 | [
"BSD-3-Clause"
] | null | null | null | tests/migrations/test_migrations_first/second.py | iMerica/dj-models | fbe4a55ac362f9355a2298f58aa0deb0b6082e19 | [
"BSD-3-Clause"
] | 2 | 2021-08-09T02:29:09.000Z | 2021-08-20T03:30:11.000Z | from djmodels.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("migrations", "thefirst"),
("migrations2", "0002_second"),
]
operations = [
migrations.DeleteModel("Tribble"),
migrations.RemoveField("Author", "silly_field"),
migrations.AddField("Author", "rating", models.IntegerField(default=0)),
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.Author", models.SET_NULL, null=True)),
],
)
]
| 23.107143 | 95 | 0.57187 |
795918b3dc00fd7eb6ca9f4e5178eb83f87fed73 | 1,754 | py | Python | py/dcp/problems/misc/pattern_matching.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 1 | 2020-06-26T13:28:43.000Z | 2020-06-26T13:28:43.000Z | py/dcp/problems/misc/pattern_matching.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 7 | 2021-11-18T19:46:08.000Z | 2022-03-12T01:03:01.000Z | py/dcp/problems/misc/pattern_matching.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | null | null | null | """
Pattern Matching.
You are given two strings, pattern and value. The pattern string consists of
just the letters a and b, describing a pattern within a string. For example,
the string catcatgocatgo matches the pattern aabab (where cat is a and go
is b). It also matches patterns like a, ab, and b. Write a method to
determine if value matches pattern.
"""
def pattern_match1(value, pattern):
def is_valid_pattern(pattern):
n = len(pattern)
if n == 0: return False
for index in range(1, n):
if not pattern[index] == pattern[index - 1]:
return False
return True
def extract_pattern(value, pattern, a_len, b_len):
a_values, b_values = [], []
index = 0
n, np = len(value), len(pattern)
position = 0
while True:
current = pattern[position]
if current == 'a':
a_values += [value[index:index + a_len]]
index += a_len
else:
b_values += [value[index:index + b_len]]
index += b_len
position += 1
if index >= n or position >= np:
break
return a_values, b_values
if value == None or pattern == None: return None
n = len(value)
num_a, num_b = 0, 0
for c in pattern:
if c == 'a':
num_a += 1
else:
num_b += 1
for len_a in range(1, n):
for len_b in range(1, n):
a, b = extract_pattern(value, pattern, len_a, len_b)
if is_valid_pattern(a) and is_valid_pattern(b):
return len(a) == num_a and len(b) == num_b
return False | 24.361111 | 76 | 0.531357 |
79591939bad209771e6fefdee5745d01f55610be | 13,366 | py | Python | reversion/revisions.py | bellini666/django-reversion | 3be3e51ed20bf3f3db07b069a03923ca889f69b2 | [
"BSD-3-Clause"
] | null | null | null | reversion/revisions.py | bellini666/django-reversion | 3be3e51ed20bf3f3db07b069a03923ca889f69b2 | [
"BSD-3-Clause"
] | null | null | null | reversion/revisions.py | bellini666/django-reversion | 3be3e51ed20bf3f3db07b069a03923ca889f69b2 | [
"BSD-3-Clause"
] | null | null | null | from contextvars import ContextVar
from collections import namedtuple, defaultdict
from contextlib import contextmanager
from functools import wraps
from django.apps import apps
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.db import models, transaction, router
from django.db.models.query import QuerySet
from django.db.models.signals import post_save, m2m_changed
from django.utils.encoding import force_str
from django.utils import timezone
from reversion.errors import RevisionManagementError, RegistrationError
from reversion.signals import pre_revision_commit, post_revision_commit
_VersionOptions = namedtuple("VersionOptions", (
"fields",
"follow",
"format",
"for_concrete_model",
"ignore_duplicates",
"use_natural_foreign_keys",
))
_StackFrame = namedtuple("StackFrame", (
"manage_manually",
"user",
"comment",
"date_created",
"db_versions",
"meta",
))
_stack = ContextVar("reversion-stack", default=[])
def is_active():
return bool(_stack.get())
def _current_frame():
if not is_active():
raise RevisionManagementError("There is no active revision for this thread")
return _stack.get()[-1]
def _copy_db_versions(db_versions):
return {
db: versions.copy()
for db, versions
in db_versions.items()
}
def _push_frame(manage_manually, using):
if is_active():
current_frame = _current_frame()
db_versions = _copy_db_versions(current_frame.db_versions)
db_versions.setdefault(using, {})
stack_frame = current_frame._replace(
manage_manually=manage_manually,
db_versions=db_versions,
)
else:
stack_frame = _StackFrame(
manage_manually=manage_manually,
user=None,
comment="",
date_created=timezone.now(),
db_versions={using: {}},
meta=(),
)
_stack.set(_stack.get() + [stack_frame])
def _update_frame(**kwargs):
_stack.get()[-1] = _current_frame()._replace(**kwargs)
def _pop_frame():
prev_frame = _current_frame()
stack = _stack.get()
del stack[-1]
if is_active():
current_frame = _current_frame()
db_versions = {
db: prev_frame.db_versions[db]
for db
in current_frame.db_versions.keys()
}
_update_frame(
user=prev_frame.user,
comment=prev_frame.comment,
date_created=prev_frame.date_created,
db_versions=db_versions,
meta=prev_frame.meta,
)
def is_manage_manually():
return _current_frame().manage_manually
def set_user(user):
_update_frame(user=user)
def get_user():
return _current_frame().user
def set_comment(comment):
_update_frame(comment=comment)
def get_comment():
return _current_frame().comment
def set_date_created(date_created):
_update_frame(date_created=date_created)
def get_date_created():
return _current_frame().date_created
def add_meta(model, **values):
_update_frame(meta=_current_frame().meta + ((model, values),))
def _follow_relations(obj):
version_options = _get_options(obj.__class__)
for follow_name in version_options.follow:
try:
follow_obj = getattr(obj, follow_name)
except ObjectDoesNotExist:
continue
if isinstance(follow_obj, models.Model):
yield follow_obj
elif isinstance(follow_obj, (models.Manager, QuerySet)):
for follow_obj_instance in follow_obj.all():
yield follow_obj_instance
elif follow_obj is not None:
raise RegistrationError("{name}.{follow_name} should be a Model or QuerySet".format(
name=obj.__class__.__name__,
follow_name=follow_name,
))
def _follow_relations_recursive(obj):
def do_follow(obj):
if obj not in relations:
relations.add(obj)
for related in _follow_relations(obj):
do_follow(related)
relations = set()
do_follow(obj)
return relations
def _add_to_revision(obj, using, model_db, explicit):
from reversion.models import Version
# Exit early if the object is not fully-formed.
if obj.pk is None:
return
version_options = _get_options(obj.__class__)
content_type = _get_content_type(obj.__class__, using)
object_id = force_str(obj.pk)
version_key = (content_type, object_id)
# If the obj is already in the revision, stop now.
db_versions = _current_frame().db_versions
versions = db_versions[using]
if version_key in versions and not explicit:
return
# Get the version data.
version = Version(
content_type=content_type,
object_id=object_id,
db=model_db,
format=version_options.format,
serialized_data=serializers.serialize(
version_options.format,
(obj,),
fields=version_options.fields,
use_natural_foreign_keys=version_options.use_natural_foreign_keys,
),
object_repr=force_str(obj),
)
# If the version is a duplicate, stop now.
if version_options.ignore_duplicates and explicit:
previous_version = Version.objects.using(using).get_for_object(obj, model_db=model_db).first()
if previous_version and previous_version._local_field_dict == version._local_field_dict:
return
# Store the version.
db_versions = _copy_db_versions(db_versions)
db_versions[using][version_key] = version
_update_frame(db_versions=db_versions)
# Follow relations.
for follow_obj in _follow_relations(obj):
_add_to_revision(follow_obj, using, model_db, False)
def add_to_revision(obj, model_db=None):
model_db = model_db or router.db_for_write(obj.__class__, instance=obj)
for db in _current_frame().db_versions.keys():
_add_to_revision(obj, db, model_db, True)
def _save_revision(versions, user=None, comment="", meta=(), date_created=None, using=None):
from reversion.models import Revision
# Only save versions that exist in the database.
# Use _base_manager so we don't have problems when _default_manager is overriden
model_db_pks = defaultdict(lambda: defaultdict(set))
for version in versions:
model_db_pks[version._model][version.db].add(version.object_id)
model_db_existing_pks = {
model: {
db: frozenset(map(
force_str,
model._base_manager.using(db).filter(pk__in=pks).values_list("pk", flat=True),
))
for db, pks in db_pks.items()
}
for model, db_pks in model_db_pks.items()
}
versions = [
version for version in versions
if version.object_id in model_db_existing_pks[version._model][version.db]
]
# Bail early if there are no objects to save.
if not versions:
return
# Save a new revision.
revision = Revision(
date_created=date_created,
user=user,
comment=comment,
)
# Send the pre_revision_commit signal.
pre_revision_commit.send(
sender=create_revision,
revision=revision,
versions=versions,
)
# Save the revision.
revision.save(using=using)
# Save version models.
for version in versions:
version.revision = revision
version.save(using=using)
# Save the meta information.
for meta_model, meta_fields in meta:
meta_model._base_manager.db_manager(using=using).create(
revision=revision,
**meta_fields
)
# Send the post_revision_commit signal.
post_revision_commit.send(
sender=create_revision,
revision=revision,
versions=versions,
)
@contextmanager
def _dummy_context():
yield
@contextmanager
def _create_revision_context(manage_manually, using, atomic):
context = transaction.atomic(using=using) if atomic else _dummy_context()
with context:
_push_frame(manage_manually, using)
try:
yield
# Only save for a db if that's the last stack frame for that db.
if not any(using in frame.db_versions for frame in _stack.get()[:-1]):
current_frame = _current_frame()
_save_revision(
versions=current_frame.db_versions[using].values(),
user=current_frame.user,
comment=current_frame.comment,
meta=current_frame.meta,
date_created=current_frame.date_created,
using=using,
)
finally:
_pop_frame()
def create_revision(manage_manually=False, using=None, atomic=True):
from reversion.models import Revision
using = using or router.db_for_write(Revision)
return _ContextWrapper(_create_revision_context, (manage_manually, using, atomic))
class _ContextWrapper(object):
def __init__(self, func, args):
self._func = func
self._args = args
self._context = func(*args)
def __enter__(self):
return self._context.__enter__()
def __exit__(self, exc_type, exc_value, traceback):
return self._context.__exit__(exc_type, exc_value, traceback)
def __call__(self, func):
@wraps(func)
def do_revision_context(*args, **kwargs):
with self._func(*self._args):
return func(*args, **kwargs)
return do_revision_context
def _post_save_receiver(sender, instance, using, **kwargs):
if is_registered(sender) and is_active() and not is_manage_manually():
add_to_revision(instance, model_db=using)
def _m2m_changed_receiver(instance, using, action, model, reverse, **kwargs):
if action.startswith("post_") and not reverse:
if is_registered(instance) and is_active() and not is_manage_manually():
add_to_revision(instance, model_db=using)
def _get_registration_key(model):
return (model._meta.app_label, model._meta.model_name)
_registered_models = {}
def is_registered(model):
return _get_registration_key(model) in _registered_models
def get_registered_models():
return (apps.get_model(*key) for key in _registered_models.keys())
def _get_senders_and_signals(model):
yield model, post_save, _post_save_receiver
opts = model._meta.concrete_model._meta
for field in opts.local_many_to_many:
m2m_model = field.remote_field.through
if isinstance(m2m_model, str):
if "." not in m2m_model:
m2m_model = "{app_label}.{m2m_model}".format(
app_label=opts.app_label,
m2m_model=m2m_model
)
yield m2m_model, m2m_changed, _m2m_changed_receiver
def register(model=None, fields=None, exclude=(), follow=(), format="json",
for_concrete_model=True, ignore_duplicates=False, use_natural_foreign_keys=False):
def register(model):
# Prevent multiple registration.
if is_registered(model):
raise RegistrationError("{model} has already been registered with django-reversion".format(
model=model,
))
# Parse fields.
opts = model._meta.concrete_model._meta
version_options = _VersionOptions(
fields=tuple(
field_name
for field_name
in ([
field.name
for field
in opts.local_fields + opts.local_many_to_many
] if fields is None else fields)
if field_name not in exclude
),
follow=tuple(follow),
format=format,
for_concrete_model=for_concrete_model,
ignore_duplicates=ignore_duplicates,
use_natural_foreign_keys=use_natural_foreign_keys,
)
# Register the model.
_registered_models[_get_registration_key(model)] = version_options
# Connect signals.
for sender, signal, signal_receiver in _get_senders_and_signals(model):
signal.connect(signal_receiver, sender=sender)
# All done!
return model
# Return a class decorator if model is not given
if model is None:
return register
# Register the model.
return register(model)
def _assert_registered(model):
if not is_registered(model):
raise RegistrationError("{model} has not been registered with django-reversion".format(
model=model,
))
def _get_options(model):
_assert_registered(model)
return _registered_models[_get_registration_key(model)]
def unregister(model):
_assert_registered(model)
del _registered_models[_get_registration_key(model)]
# Disconnect signals.
for sender, signal, signal_receiver in _get_senders_and_signals(model):
signal.disconnect(signal_receiver, sender=sender)
def _get_content_type(model, using):
from django.contrib.contenttypes.models import ContentType
version_options = _get_options(model)
return ContentType.objects.db_manager(using).get_for_model(
model,
for_concrete_model=version_options.for_concrete_model,
)
| 31.011601 | 103 | 0.662577 |
79591a2260195efbc52e75d062179cd21347463b | 1,543 | py | Python | arcade/gui/examples/box_group.py | akapkotel/arcade | 6e43ec53e7bfa3dee1aa574404794e3695aad381 | [
"MIT"
] | null | null | null | arcade/gui/examples/box_group.py | akapkotel/arcade | 6e43ec53e7bfa3dee1aa574404794e3695aad381 | [
"MIT"
] | 1 | 2022-03-21T06:24:29.000Z | 2022-03-21T06:24:29.000Z | arcade/gui/examples/box_group.py | Ibrahim2750mi/arcade | bf3229e64117931bffb8e50926a996a7a8fc9b8b | [
"MIT"
] | null | null | null | import arcade
from arcade.gui import UIManager, UIBoxLayout
from arcade.gui.widgets import UIDummy, UISpace
from arcade.gui.widgets.layout import UIAnchorLayout
class UIMockup(arcade.Window):
def __init__(self):
super().__init__(800, 600, "UI Mockup", resizable=True)
self.manager = UIManager()
self.manager.enable()
arcade.set_background_color(arcade.color.DARK_BLUE_GRAY)
anchor = self.manager.add(UIAnchorLayout())
self.v_box = UIBoxLayout(
children=[
UIDummy(width=200, color=arcade.color.RED),
UIDummy(width=200, color=arcade.color.YELLOW),
UIDummy(width=200, color=arcade.color.GREEN),
],
space_between=20,
)
anchor.add(
anchor_x="center_x",
anchor_y="center_y",
child=self.v_box,
)
self.h_box = UIBoxLayout(
vertical=False,
children=[
UIDummy(width=100, color=arcade.color.RED),
UISpace(width=20, height=100),
UIDummy(width=50, color=arcade.color.YELLOW).with_padding(right=30),
UIDummy(width=20, color=arcade.color.GREEN),
],
)
anchor.add(
child=self.h_box.with_border(),
align_x=20,
anchor_x="left",
align_y=20,
anchor_y="bottom",
)
def on_draw(self):
self.clear()
self.manager.draw()
window = UIMockup()
arcade.run()
| 28.574074 | 84 | 0.564485 |
79591c0692d7618f5316e7b429e8d2302c7453e9 | 1,617 | py | Python | src/get_more_features/get_reading_level.py | kashev/reddit-reliability | 6d92486019b9264d2b7bfd51912ef257fc6088ae | [
"MIT"
] | null | null | null | src/get_more_features/get_reading_level.py | kashev/reddit-reliability | 6d92486019b9264d2b7bfd51912ef257fc6088ae | [
"MIT"
] | null | null | null | src/get_more_features/get_reading_level.py | kashev/reddit-reliability | 6d92486019b9264d2b7bfd51912ef257fc6088ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
## -*- coding: utf-8 -*-
# reddit-reliability
import pymongo
from textstat.textstat import textstat
from pymongo import MongoClient
def main():
mongo_client = MongoClient('mongodb://cs598tar:cs598tar@'
'107.170.215.176:27017')
reddit_data = mongo_client.reddit_data
user_data = reddit_data.user_data
user_reading_level = reddit_data.user_reading_level
user_comments = reddit_data.user_comments
user_reading_level.create_index(
[("username", pymongo.ASCENDING)],
background=True,
unique=True,
dropDups=True
)
for user in user_data.find(no_cursor_timeout=True).sort('data.name', 1):
name = user['data']['name']
print name
comment_list = []
for comment in user_comments.find({'data.author': name}):
if comment['kind'] == 't1': # Actually a comment
comment_text = comment['data']['body']
comment_list.append(comment_text)
comment_book = ' '.join(comment_list).strip()
try:
if len(comment_book) > 0:
reading_ease = textstat.flesch_reading_ease(comment_book)
else:
reading_ease = 0
except TypeError: # I hate textstat
reading_ease = 0
reading_level_data = {'username': name,
'reading_level': reading_ease}
try:
user_reading_level.insert_one(reading_level_data)
except pymongo.errors.DuplicateKeyError:
continue
if __name__ == '__main__':
main()
| 30.509434 | 76 | 0.606679 |
79591c2f6720db8aa3a9952697556ade325272de | 6,198 | py | Python | arjuna/core/discovery.py | test-mile/arjuna | 21880b41e061e11bac2e600a3614684f8af75b2f | [
"Apache-2.0"
] | 9 | 2018-11-15T10:09:17.000Z | 2021-01-12T05:59:19.000Z | arjuna/core/discovery.py | test-mile/arjuna | 21880b41e061e11bac2e600a3614684f8af75b2f | [
"Apache-2.0"
] | 2 | 2019-07-01T15:33:46.000Z | 2019-07-12T13:04:08.000Z | arjuna/core/discovery.py | test-mile/arjuna | 21880b41e061e11bac2e600a3614684f8af75b2f | [
"Apache-2.0"
] | 4 | 2018-12-02T15:14:04.000Z | 2020-05-28T12:57:24.000Z | '''
This file is a part of Test Mile Arjuna
Copyright 2018 Test Mile Software Testing Pvt Ltd
Website: www.TestMile.com
Email: support [at] testmile.com
Creator: Rahul Verma
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
from arjuna.core.enums import *
from arjuna.core.utils import file_utils
from arjuna.core.utils import sys_utils
class DiscoveredFile:
def __init__(self):
self.props = {}
def attr(self, attr):
return self.props[attr]
def set_attr(self, name, attr):
self.props[name] = attr
fa_msg1 = '''Duplicate test file found with name: %s.
Check package and class names in test directory.
Arjuna follows a case-INSENSITIVE approach for test names.'''
class FileAggregator:
def __init__(self):
self.files = []
self.found_class_names = set()
self.temp_map = {}
from arjuna.tpi import Arjuna
self.logger = Arjuna.get_logger()
self.console = Arjuna.get_console()
def add(self, df):
key = df.attr(DiscoveredFileAttributeEnum.DIRECTORY_ABSOLUTE_PATH) + "/" + df.attr(
DiscoveredFileAttributeEnum.FULL_NAME).upper()
if key in self.found_class_names or key in self.temp_map:
fa_msg1.format(DiscoveredFileAttributeEnum.FULL_NAME)
self.console.display_error(fa_msg1)
sys_utils.fexit()
self.temp_map[key] = df
def freeze(self):
paths = []
paths.extend(self.temp_map.keys())
paths.sort()
for path in paths:
self.files.append(self.temp_map[path])
def __iter__(self):
return iter(self.files)
def enumerate(self):
for f in self:
self.logger.debug("-------------------------")
self.logger.debug("Name:\t" + f.attr(DiscoveredFileAttributeEnum.NAME))
self.logger.debug("Extension:\t" + f.attr(DiscoveredFileAttributeEnum.EXTENSION))
self.logger.debug("Full Name:\t" + f.attr(DiscoveredFileAttributeEnum.FULL_NAME))
self.logger.debug("Package Dot Notation:\t" + f.attr(DiscoveredFileAttributeEnum.PACKAGE_DOT_NOTATION))
self.logger.debug(
"Directory Relative Path:\t" + f.attr(DiscoveredFileAttributeEnum.DIRECTORY_RELATIVE_PATH))
self.logger.debug(
"Directory Absolute Path:\t" + f.attr(DiscoveredFileAttributeEnum.DIRECTORY_ABSOLUTE_PATH))
self.logger.debug("Comma Separated Relative Path:\t"
+ f.attr(DiscoveredFileAttributeEnum.COMMA_SEPATARED_RELATIVE_PATH))
# self.logger.debug("Container:\t" + f.attr(DiscoveredFileAttributeEnum.CONTAINER))
# self.logger.debug("Container Type:\t" + f.attr(DiscoveredFileAttributeEnum.CONTAINER_TYPE))
self.logger.debug("-------------------------")
class FileDiscoverer:
def __init__(self, aggregator, root_dir, include_prefixes=None):
self.aggregator = aggregator
if root_dir.endswith("\\") or root_dir.endswith("//"):
self.root_dir = root_dir[0:-1]
else:
self.root_dir = root_dir
self.root_dir = file_utils.normalize_path(self.root_dir)
self.cdir = None
self.cabsdir = None
self.prefixes = [file_utils.normalize_path(p) for p in include_prefixes]
def discover(self):
for d, subdlist, flist in os.walk(self.root_dir):
normalized_d = file_utils.normalize_path(d)
if flist:
for f in flist:
full_path = file_utils.normalize_path(os.path.abspath(os.path.join(normalized_d, f)))
consider = False
for prefix in self.prefixes:
if normalized_d.startswith(prefix):
consider = True
break
if not consider: continue
file_ext = file_utils.get_extension(full_path)
if file_ext.lower() not in set(['py']): continue
parent_dir = file_utils.normalize_path(os.path.dirname(full_path))
pkg_parent_dir = None # os.path.commonpath()
if parent_dir == self.root_dir:
pkg_parent_dir = ""
else:
pkg_parent_dir = parent_dir[parent_dir.index(self.root_dir) + len(self.root_dir) + 1:]
file_ext = file_utils.get_extension(full_path)
df = DiscoveredFile()
df.set_attr(DiscoveredFileAttributeEnum.NAME, file_utils.get_nonext_basename(full_path))
df.set_attr(DiscoveredFileAttributeEnum.EXTENSION, file_ext)
df.set_attr(DiscoveredFileAttributeEnum.FULL_NAME, f)
df.set_attr(DiscoveredFileAttributeEnum.PACKAGE_DOT_NOTATION,
pkg_parent_dir.replace("/", "."))
df.set_attr(DiscoveredFileAttributeEnum.DIRECTORY_RELATIVE_PATH, pkg_parent_dir)
df.set_attr(DiscoveredFileAttributeEnum.DIRECTORY_ABSOLUTE_PATH, parent_dir)
replaced = df.attr(DiscoveredFileAttributeEnum.DIRECTORY_RELATIVE_PATH).replace("/", "|")
replaced = replaced.replace("\\", "|")
df.set_attr(DiscoveredFileAttributeEnum.COMMA_SEPATARED_RELATIVE_PATH,
",".join(replaced.split("|")))
# df.set_attr(DiscoveredFileAttributeEnum.CONTAINER, attr.NA_STRING)
# df.set_attr(DiscoveredFileAttributeEnum.CONTAINER_TYPE, attr.NA_STRING)
self.aggregator.add(df)
self.aggregator.freeze()
| 43.957447 | 115 | 0.626492 |
79591c4b891257d60320b96c04dfa00d8443b0ec | 9,095 | py | Python | lib/primestonelib.py | edgaralenpwn/primestoneSentinel | 71c2d72fb12b90e5ba1832776581fb6e2306a287 | [
"MIT"
] | null | null | null | lib/primestonelib.py | edgaralenpwn/primestoneSentinel | 71c2d72fb12b90e5ba1832776581fb6e2306a287 | [
"MIT"
] | null | null | null | lib/primestonelib.py | edgaralenpwn/primestoneSentinel | 71c2d72fb12b90e5ba1832776581fb6e2306a287 | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import base58
import hashlib
import re
from decimal import Decimal
import simplejson
import binascii
from misc import printdbg, epoch2str
import time
def is_valid_primestone_address(address, network='mainnet'):
# Only public key addresses are allowed
# A valid address is a RIPEMD-160 hash which contains 20 bytes
# Prior to base58 encoding 1 version byte is prepended and
# 4 checksum bytes are appended so the total number of
# base58 encoded bytes should be 25. This means the number of characters
# in the encoding should be about 34 ( 25 * log2( 256 ) / log2( 58 ) ).
primestone_version = 140 if network == 'testnet' else 76
# Check length (This is important because the base58 library has problems
# with long addresses (which are invalid anyway).
if ((len(address) < 26) or (len(address) > 35)):
return False
address_version = None
try:
decoded = base58.b58decode_chk(address)
address_version = ord(decoded[0:1])
except:
# rescue from exception, not a valid Primestone address
return False
if (address_version != primestone_version):
return False
return True
def hashit(data):
return int(hashlib.sha256(data.encode('utf-8')).hexdigest(), 16)
# returns the masternode VIN of the elected winner
def elect_mn(**kwargs):
current_block_hash = kwargs['block_hash']
mn_list = kwargs['mnlist']
# filter only enabled MNs
enabled = [mn for mn in mn_list if mn.status == 'ENABLED']
block_hash_hash = hashit(current_block_hash)
candidates = []
for mn in enabled:
mn_vin_hash = hashit(mn.vin)
diff = mn_vin_hash - block_hash_hash
absdiff = abs(diff)
candidates.append({'vin': mn.vin, 'diff': absdiff})
candidates.sort(key=lambda k: k['diff'])
try:
winner = candidates[0]['vin']
except:
winner = None
return winner
def parse_masternode_status_vin(status_vin_string):
status_vin_string_regex = re.compile('CTxIn\(COutPoint\(([0-9a-zA-Z]+),\\s*(\d+)\),')
m = status_vin_string_regex.match(status_vin_string)
# To Support additional format of string return from masternode status rpc.
if m is None:
status_output_string_regex = re.compile('([0-9a-zA-Z]+)\-(\d+)')
m = status_output_string_regex.match(status_vin_string)
txid = m.group(1)
index = m.group(2)
vin = txid + '-' + index
if (txid == '0000000000000000000000000000000000000000000000000000000000000000'):
vin = None
return vin
def create_superblock(proposals, event_block_height, budget_max, sb_epoch_time):
from models import Superblock, GovernanceObject, Proposal
from constants import SUPERBLOCK_FUDGE_WINDOW
# don't create an empty superblock
if (len(proposals) == 0):
printdbg("No proposals, cannot create an empty superblock.")
return None
budget_allocated = Decimal(0)
fudge = SUPERBLOCK_FUDGE_WINDOW # fudge-factor to allow for slighly incorrect estimates
payments = []
for proposal in proposals:
fmt_string = "name: %s, rank: %4d, hash: %s, amount: %s <= %s"
# skip proposals that are too expensive...
if (budget_allocated + proposal.payment_amount) > budget_max:
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"skipped (blows the budget)",
)
)
continue
# skip proposals if the SB isn't within the Proposal time window...
window_start = proposal.start_epoch - fudge
window_end = proposal.end_epoch + fudge
printdbg("\twindow_start: %s" % epoch2str(window_start))
printdbg("\twindow_end: %s" % epoch2str(window_end))
printdbg("\tsb_epoch_time: %s" % epoch2str(sb_epoch_time))
if (sb_epoch_time < window_start or sb_epoch_time > window_end):
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"skipped (SB time is outside of Proposal window)",
)
)
continue
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"adding",
)
)
# else add proposal and keep track of total budget allocation
budget_allocated += proposal.payment_amount
payment = {'address': proposal.payment_address,
'amount': "{0:.8f}".format(proposal.payment_amount),
'proposal': "{}".format(proposal.object_hash)}
payments.append(payment)
# don't create an empty superblock
if not payments:
printdbg("No proposals made the cut!")
return None
# 'payments' now contains all the proposals for inclusion in the
# Superblock, but needs to be sorted by proposal hash descending
payments.sort(key=lambda k: k['proposal'], reverse=True)
sb = Superblock(
event_block_height=event_block_height,
payment_addresses='|'.join([pd['address'] for pd in payments]),
payment_amounts='|'.join([pd['amount'] for pd in payments]),
proposal_hashes='|'.join([pd['proposal'] for pd in payments]),
)
printdbg("generated superblock: %s" % sb.__dict__)
return sb
# shims 'til we can fix the primestoned side
def SHIM_serialise_for_primestoned(sentinel_hex):
from models import CROWDCOIND_GOVOBJ_TYPES
# unpack
obj = deserialise(sentinel_hex)
# shim for primestoned
govtype = obj[0]
# add 'type' attribute
obj[1]['type'] = CROWDCOIND_GOVOBJ_TYPES[govtype]
# superblock => "trigger" in primestoned
if govtype == 'superblock':
obj[0] = 'trigger'
# primestoned expects an array (even though there is only a 1:1 relationship between govobj->class)
obj = [obj]
# re-pack
primestoned_hex = serialise(obj)
return primestoned_hex
# shims 'til we can fix the primestoned side
def SHIM_deserialise_from_primestoned(primestoned_hex):
from models import CROWDCOIND_GOVOBJ_TYPES
# unpack
obj = deserialise(primestoned_hex)
# shim from primestoned
# only one element in the array...
obj = obj[0]
# extract the govobj type
govtype = obj[0]
# superblock => "trigger" in primestoned
if govtype == 'trigger':
obj[0] = govtype = 'superblock'
# remove redundant 'type' attribute
if 'type' in obj[1]:
del obj[1]['type']
# re-pack
sentinel_hex = serialise(obj)
return sentinel_hex
# convenience
def deserialise(hexdata):
json = binascii.unhexlify(hexdata)
obj = simplejson.loads(json, use_decimal=True)
return obj
def serialise(dikt):
json = simplejson.dumps(dikt, sort_keys=True, use_decimal=True)
hexdata = binascii.hexlify(json.encode('utf-8')).decode('utf-8')
return hexdata
def did_we_vote(output):
from bitcoinrpc.authproxy import JSONRPCException
# sentinel
voted = False
err_msg = ''
try:
detail = output.get('detail').get('primestone.conf')
result = detail.get('result')
if 'errorMessage' in detail:
err_msg = detail.get('errorMessage')
except JSONRPCException as e:
result = 'failed'
err_msg = e.message
# success, failed
printdbg("result = [%s]" % result)
if err_msg:
printdbg("err_msg = [%s]" % err_msg)
voted = False
if result == 'success':
voted = True
# in case we spin up a new instance or server, but have already voted
# on the network and network has recorded those votes
m_old = re.match(r'^time between votes is too soon', err_msg)
m_new = re.search(r'Masternode voting too often', err_msg, re.M)
if result == 'failed' and (m_old or m_new):
printdbg("DEBUG: Voting too often, need to sync w/network")
voted = False
return voted
def parse_raw_votes(raw_votes):
votes = []
for v in list(raw_votes.values()):
(outpoint, ntime, outcome, signal) = v.split(':')
signal = signal.lower()
outcome = outcome.lower()
mn_collateral_outpoint = parse_masternode_status_vin(outpoint)
v = {
'mn_collateral_outpoint': mn_collateral_outpoint,
'signal': signal,
'outcome': outcome,
'ntime': ntime,
}
votes.append(v)
return votes
def blocks_to_seconds(blocks):
"""
Return the estimated number of seconds which will transpire for a given
number of blocks.
"""
return blocks * 2.62 * 60
| 29.529221 | 103 | 0.632655 |
79591c806e37cfddd012f73141cd8bc62b037109 | 6,287 | py | Python | models_clevr_snmn/output_unit.py | AgarwalVedika/snmn_cvpr | 494dbc36ae4938d4f16072c4d91e7fbdfd76723f | [
"BSD-2-Clause"
] | null | null | null | models_clevr_snmn/output_unit.py | AgarwalVedika/snmn_cvpr | 494dbc36ae4938d4f16072c4d91e7fbdfd76723f | [
"BSD-2-Clause"
] | null | null | null | models_clevr_snmn/output_unit.py | AgarwalVedika/snmn_cvpr | 494dbc36ae4938d4f16072c4d91e7fbdfd76723f | [
"BSD-2-Clause"
] | null | null | null | import tensorflow as tf
from tensorflow import convert_to_tensor as to_T, newaxis as ax
from .config import cfg
from util.cnn import fc_layer as fc, fc_elu_layer as fc_elu, conv_layer as conv
import ipdb
def build_output_unit_vqa(q_encoding, m_last, num_choices, apply_dropout,
scope='output_unit', reuse=None):
"""
Apply a 2-layer fully-connected network to predict answers. Apply dropout
if specified.
Input:
q_encoding: [N, d], tf.float32
m_last: [N, d], tf.float32
Return:
vqa_scores: [N, num_choices], tf.float32
"""
output_dim = cfg.MODEL.VQA_OUTPUT_DIM
with tf.variable_scope(scope, reuse=reuse):
if cfg.MODEL.VQA_OUTPUT_USE_QUESTION:
fc1 = fc_elu(
'fc1', tf.concat([q_encoding, m_last], axis=1),
output_dim=output_dim)
else:
fc1 = fc_elu('fc1_wo_q', m_last, output_dim=output_dim)
if apply_dropout:
fc1 = tf.nn.dropout(fc1, cfg.TRAIN.DROPOUT_KEEP_PROB)
print(cfg.TRAIN.DROPOUT_KEEP_PROB)
ipdb.set_trace()
fc2 = fc('fc2', fc1, output_dim=num_choices)
vqa_scores = fc2
return vqa_scores
def build_output_unit_loc(q_encoding, kb_batch, att_last,
scope='output_unit_loc', reuse=None):
"""
Apply a 1-layer convolution network to predict localization scores.
Apply dropout
if specified.
Input:
kb_batch: [N, H, W, d], tf.float32
att_last: [N, H, W, 1], tf.float32
Return:
loc_scores: [N, H*W], tf.float32
bbox_offset: [N, 4], tf.float32
"""
with tf.variable_scope(scope, reuse=reuse):
if cfg.MODEL.LOC_SCORES_POS_AFFINE:
# make sure att signs do not flip
w = tf.abs(tf.get_variable('loc_scores_affine_raw_w', []))
b = tf.get_variable('loc_scores_affine_b', [])
loc_scores = w * att_last + b
else:
loc_scores = conv(
'conv_loc', att_last, kernel_size=3, stride=1, output_dim=1)
loc_scores = tf.reshape(
loc_scores, [-1, cfg.MODEL.H_FEAT*cfg.MODEL.W_FEAT])
# extract the attended features for bounding box regression
if cfg.MODEL.BBOX_REG_AS_FCN:
if cfg.MODEL.BBOX_REG_USE_QUESTION:
q_mapped = fc(
'fc_q_mapped', q_encoding, output_dim=cfg.MODEL.KB_DIM)
bbox_offset_input = tf.nn.l2_normalize(
q_mapped[:, ax, ax, :] * kb_batch, axis=-1)
else:
bbox_offset_input = kb_batch
bbox_offset_fcn = conv(
'conv_bbox_offset', bbox_offset_input, 1, 1, output_dim=4)
N = tf.shape(bbox_offset_fcn)[0]
B = cfg.MODEL.H_FEAT*cfg.MODEL.W_FEAT # B = H*W
# bbox_offset_fcn [N, B, 4] is used for training
bbox_offset_fcn = tf.reshape(bbox_offset_fcn, to_T([N, B, 4]))
# bbox_offset [N, 4] is only used for prediction
bbox_offset_flat = tf.reshape(bbox_offset_fcn, to_T([N*B, 4]))
slice_inds = tf.range(N) * B + tf.argmax(
loc_scores, axis=-1, output_type=tf.int32)
bbox_offset = tf.gather(bbox_offset_flat, slice_inds)
else:
bbox_offset_fcn = None
kb_loc = _extract_softmax_avg(kb_batch, att_last)
if cfg.MODEL.BBOX_REG_USE_QUESTION:
q_mapped = fc(
'fc_q_mapped', q_encoding, output_dim=cfg.MODEL.KB_DIM)
elt_prod = tf.nn.l2_normalize(q_mapped * kb_loc, axis=-1)
bbox_offset = fc(
'fc_bbox_offset_with_q', elt_prod, output_dim=4)
else:
bbox_offset = fc('fc_bbox_offset', kb_loc, output_dim=4)
return loc_scores, bbox_offset, bbox_offset_fcn
def build_output_unit_rec(rec_inputs, input_seq_batch, embed_seq,
seq_length_batch, num_vocab, scope='output_unit_rec',
reuse=None):
"""
Try to reconstruct the input sequence from the controller outputs with a
seq-to-seq LSTM.
Input:
rec_inputs: [T, N, ?], tf.float32
input_seq_batch: [S, N], tf.int32
embed_seq: [S, N, e], tf.float32
seq_length_batch: [N], tf.int32
Return:
loss_rec: [], tf.float32
"""
with tf.variable_scope(scope, reuse=reuse):
S = tf.shape(input_seq_batch)[0]
N = tf.shape(input_seq_batch)[1]
lstm_dim = cfg.MODEL.LSTM_DIM
# encoder
cell_encoder = tf.nn.rnn_cell.BasicLSTMCell(lstm_dim, name='c_encoder')
_, states_encoder = tf.nn.dynamic_rnn(
cell_encoder, rec_inputs, dtype=tf.float32, time_major=True)
# decoder
cell_decoder = tf.nn.rnn_cell.BasicLSTMCell(lstm_dim, name='c_decoder')
embed_seq_shifted = tf.concat(
[tf.zeros_like(embed_seq[:1]), embed_seq[:-1]], axis=0)
outputs_decoder, _ = tf.nn.dynamic_rnn(
cell_decoder, embed_seq_shifted, sequence_length=seq_length_batch,
initial_state=states_encoder, time_major=True)
# word prediction
outputs_flat = tf.reshape(outputs_decoder, to_T([S*N, lstm_dim]))
word_scores_flat = fc(
'fc_word_scores', outputs_flat, output_dim=num_vocab)
word_scores = tf.reshape(word_scores_flat, to_T([S, N, num_vocab]))
# cross-entropy loss over the actual sequence words
# att_mask: [S, N]
att_mask = tf.less(tf.range(S)[:, ax], seq_length_batch)
att_mask = tf.cast(att_mask, tf.float32)
loss_rec = tf.reduce_sum(
att_mask * tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=word_scores, labels=input_seq_batch)) / tf.reduce_sum(
att_mask)
return loss_rec
def _spatial_softmax(att_raw):
att_shape = tf.shape(att_raw)
N = att_shape[0]
att_softmax = tf.nn.softmax(tf.reshape(att_raw, to_T([N, -1])), axis=1)
att_softmax = tf.reshape(att_softmax, att_shape)
return att_softmax
def _extract_softmax_avg(kb_batch, att_raw):
att_softmax = _spatial_softmax(att_raw)
return tf.reduce_sum(kb_batch * att_softmax, axis=[1, 2])
| 39.540881 | 79 | 0.613806 |
79591cea06b9747769fcf0c59477363e68c0095a | 10,032 | py | Python | sdk/core/azure-core/azure/core/pipeline/transport/_requests_basic.py | ellismg/azure-sdk-for-python | 9e1e067feddd23c90b17be03b7f4f85d12f7d05b | [
"MIT"
] | null | null | null | sdk/core/azure-core/azure/core/pipeline/transport/_requests_basic.py | ellismg/azure-sdk-for-python | 9e1e067feddd23c90b17be03b7f4f85d12f7d05b | [
"MIT"
] | null | null | null | sdk/core/azure-core/azure/core/pipeline/transport/_requests_basic.py | ellismg/azure-sdk-for-python | 9e1e067feddd23c90b17be03b7f4f85d12f7d05b | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from __future__ import absolute_import
import logging
from typing import Iterator, Optional, Any, Union, TypeVar
import time
import urllib3 # type: ignore
from urllib3.util.retry import Retry # type: ignore
import requests
from azure.core.configuration import ConnectionConfiguration
from azure.core.exceptions import (
ServiceRequestError,
ServiceResponseError
)
from . import HttpRequest # pylint: disable=unused-import
from ._base import (
HttpTransport,
HttpResponse,
_HttpResponseBase
)
PipelineType = TypeVar("PipelineType")
_LOGGER = logging.getLogger(__name__)
class _RequestsTransportResponseBase(_HttpResponseBase):
"""Base class for accessing response data.
:param HttpRequest request: The request.
:param requests_response: The object returned from the HTTP library.
:param int block_size: Size in bytes.
"""
def __init__(self, request, requests_response, block_size=None):
super(_RequestsTransportResponseBase, self).__init__(request, requests_response, block_size=block_size)
self.status_code = requests_response.status_code
self.headers = requests_response.headers
self.reason = requests_response.reason
self.content_type = requests_response.headers.get('content-type')
def body(self):
return self.internal_response.content
def text(self, encoding=None):
if encoding:
self.internal_response.encoding = encoding
return self.internal_response.text
class StreamDownloadGenerator(object):
"""Generator for streaming response data.
:param pipeline: The pipeline object
:param response: The response object.
"""
def __init__(self, pipeline, response):
self.pipeline = pipeline
self.request = response.request
self.response = response
self.block_size = response.block_size
self.iter_content_func = self.response.internal_response.iter_content(self.block_size)
self.content_length = int(response.headers.get('Content-Length', 0))
self.downloaded = 0
def __len__(self):
return self.content_length
def __iter__(self):
return self
def __next__(self):
retry_active = True
retry_total = 3
retry_interval = 1000
while retry_active:
try:
chunk = next(self.iter_content_func)
if not chunk:
raise StopIteration()
self.downloaded += self.block_size
return chunk
except StopIteration:
self.response.internal_response.close()
raise StopIteration()
except (requests.exceptions.ChunkedEncodingError,
requests.exceptions.ConnectionError):
retry_total -= 1
if retry_total <= 0:
retry_active = False
else:
time.sleep(retry_interval)
headers = {'range': 'bytes=' + str(self.downloaded) + '-'}
resp = self.pipeline.run(self.request, stream=True, headers=headers)
if resp.status_code == 416:
raise
chunk = next(self.iter_content_func)
if not chunk:
raise StopIteration()
self.downloaded += len(chunk)
return chunk
continue
except requests.exceptions.StreamConsumedError:
raise
except Exception as err:
_LOGGER.warning("Unable to stream download: %s", err)
self.response.internal_response.close()
raise
next = __next__ # Python 2 compatibility.
class RequestsTransportResponse(HttpResponse, _RequestsTransportResponseBase):
"""Streaming of data from the response.
"""
def stream_download(self, pipeline):
# type: (PipelineType) -> Iterator[bytes]
"""Generator for streaming request body data."""
return StreamDownloadGenerator(pipeline, self)
class RequestsTransport(HttpTransport):
"""Implements a basic requests HTTP sender.
Since requests team recommends to use one session per requests, you should
not consider this class as thread-safe, since it will use one Session
per instance.
In this simple implementation:
- You provide the configured session if you want to, or a basic session is created.
- All kwargs received by "send" are sent to session.request directly
:keyword requests.Session session: Request session to use instead of the default one.
:keyword bool session_owner: Decide if the session provided by user is owned by this transport. Default to True.
:keyword bool use_env_settings: Uses proxy settings from environment. Defaults to True.
.. admonition:: Example:
.. literalinclude:: ../examples/test_example_sync.py
:start-after: [START requests]
:end-before: [END requests]
:language: python
:dedent: 4
:caption: Synchronous transport with Requests.
"""
_protocols = ['http://', 'https://']
def __init__(self, **kwargs):
# type: (Any) -> None
self.session = kwargs.get('session', None)
self._session_owner = kwargs.get('session_owner', True)
self.connection_config = ConnectionConfiguration(**kwargs)
self._use_env_settings = kwargs.pop('use_env_settings', True)
def __enter__(self):
# type: () -> RequestsTransport
self.open()
return self
def __exit__(self, *args): # pylint: disable=arguments-differ
self.close()
def _init_session(self, session):
# type: (requests.Session) -> None
"""Init session level configuration of requests.
This is initialization I want to do once only on a session.
"""
session.trust_env = self._use_env_settings
disable_retries = Retry(total=False, redirect=False, raise_on_status=False)
adapter = requests.adapters.HTTPAdapter(max_retries=disable_retries)
for p in self._protocols:
session.mount(p, adapter)
def open(self):
if not self.session and self._session_owner:
self.session = requests.Session()
self._init_session(self.session)
def close(self):
if self._session_owner:
self.session.close()
self._session_owner = False
self.session = None
def send(self, request, **kwargs): # type: ignore
# type: (HttpRequest, Any) -> HttpResponse
"""Send request object according to configuration.
:param request: The request object to be sent.
:type request: ~azure.core.pipeline.transport.HttpRequest
:return: An HTTPResponse object.
:rtype: ~azure.core.pipeline.transport.HttpResponse
:keyword requests.Session session: will override the driver session and use yours.
Should NOT be done unless really required. Anything else is sent straight to requests.
:keyword dict proxies: will define the proxy to use. Proxy is a dict (protocol, url)
"""
self.open()
response = None
error = None # type: Optional[Union[ServiceRequestError, ServiceResponseError]]
try:
response = self.session.request( # type: ignore
request.method,
request.url,
headers=request.headers,
data=request.data,
files=request.files,
verify=kwargs.pop('connection_verify', self.connection_config.verify),
timeout=kwargs.pop('connection_timeout', self.connection_config.timeout),
cert=kwargs.pop('connection_cert', self.connection_config.cert),
allow_redirects=False,
**kwargs)
except (urllib3.exceptions.NewConnectionError, urllib3.exceptions.ConnectTimeoutError) as err:
error = ServiceRequestError(err, error=err)
except requests.exceptions.ReadTimeout as err:
error = ServiceResponseError(err, error=err)
except requests.exceptions.ConnectionError as err:
if err.args and isinstance(err.args[0], urllib3.exceptions.ProtocolError):
error = ServiceResponseError(err, error=err)
else:
error = ServiceRequestError(err, error=err)
except requests.RequestException as err:
error = ServiceRequestError(err, error=err)
if error:
raise error
return RequestsTransportResponse(request, response, self.connection_config.data_block_size)
| 39.341176 | 116 | 0.648724 |
79591dbf5284c5487705229bc3eb520aca27d1d6 | 14,974 | py | Python | harness/tests/experiment/utils.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
] | null | null | null | harness/tests/experiment/utils.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
] | null | null | null | harness/tests/experiment/utils.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
] | null | null | null | import os
import pathlib
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type
import numpy as np
import pytest
from mypy_extensions import DefaultNamedArg
from tensorflow.keras import utils as keras_utils
import determined as det
from determined import core, gpu, keras, workload
class TrainAndValidate:
"""
Offer a similar interface as WorkloadResponseInterceptor, execpt let send() yield a whole
progression of RUN_STEP and COMPUTE_VALIDATION_METRICS, and let result() return the accumulated
metrics from each.
"""
def __init__(self, request_stop_step_id: Optional[int] = None) -> None:
self._training_metrics = None # type: Optional[List[Dict[str, Any]]]
self._avg_training_metrics = None # type: Optional[List[Dict[str, Any]]]
self._validation_metrics = None # type: Optional[List[Dict[str, Any]]]
self.request_stop_step_id = request_stop_step_id
self._steps_completed = 0
def send(
self,
steps: int,
validation_freq: int,
initial_step_id: int = 1,
scheduling_unit: int = 1,
train_batch_calls: int = 1,
) -> workload.Stream:
self._training_metrics = []
self._avg_training_metrics = []
self._validation_metrics = []
self._steps_completed = 0
interceptor = workload.WorkloadResponseInterceptor()
for step_id in range(initial_step_id, initial_step_id + steps):
stop_requested = False
yield from interceptor.send(
workload.train_workload(
step_id,
num_batches=scheduling_unit,
total_batches_processed=self._steps_completed,
),
)
metrics = interceptor.metrics_result()
batch_metrics = metrics["metrics"]["batch_metrics"]
assert len(batch_metrics) == scheduling_unit * train_batch_calls
self._training_metrics.extend(batch_metrics)
self._avg_training_metrics.append(metrics["metrics"]["avg_metrics"])
self._steps_completed += scheduling_unit
if metrics.get("stop_requested"):
assert step_id == self.request_stop_step_id, (step_id, self)
stop_requested = True
if step_id % validation_freq == 0:
yield from interceptor.send(
workload.validation_workload(
step_id, total_batches_processed=self._steps_completed
),
)
validation = interceptor.metrics_result()
v_metrics = validation["metrics"]["validation_metrics"]
self._validation_metrics.append(v_metrics)
if validation.get("stop_requested"):
assert step_id == self.request_stop_step_id
stop_requested = True
if stop_requested:
break
else:
assert step_id != self.request_stop_step_id
def result(self) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
assert self._training_metrics is not None
assert self._validation_metrics is not None
return self._training_metrics, self._validation_metrics
def get_steps_completed(self) -> int:
return self._steps_completed
def get_avg_training_metrics(self) -> List[Dict[str, Any]]:
assert self._avg_training_metrics is not None
return self._avg_training_metrics
def make_default_exp_config(
hparams: Dict[str, Any],
scheduling_unit: int,
searcher_metric: str,
checkpoint_dir: Optional[str] = None,
) -> Dict:
return {
"scheduling_unit": scheduling_unit,
"resources": {"native_parallel": False, "slots_per_trial": 1},
"hyperparameters": hparams,
"optimizations": {
"mixed_precision": "O0",
"aggregation_frequency": 1,
"gradient_compression": False,
"average_training_metrics": False,
"auto_tune_tensor_fusion": False,
"tensor_fusion_threshold": 100,
"tensor_fusion_cycle_time": 3.5,
},
"data_layer": {"type": "shared_fs"},
"checkpoint_storage": {
"type": "shared_fs",
"host_path": checkpoint_dir or "/tmp",
},
"searcher": {
"metric": searcher_metric,
},
}
def make_default_env_context(
hparams: Dict[str, Any],
experiment_config: Dict,
trial_seed: int = 0,
latest_checkpoint: Optional[str] = None,
steps_completed: int = 0,
expose_gpus: bool = False,
) -> det.EnvContext:
assert (latest_checkpoint is None) == (steps_completed == 0)
if expose_gpus:
gpu_uuids = gpu.get_gpu_uuids()
use_gpu = bool(gpu_uuids)
else:
gpu_uuids = []
use_gpu = False
return det.EnvContext(
experiment_config=experiment_config,
master_url="",
master_cert_file=None,
master_cert_name=None,
hparams=hparams,
latest_checkpoint=latest_checkpoint,
steps_completed=steps_completed,
use_gpu=use_gpu,
container_gpus=gpu_uuids,
slot_ids=[],
debug=False,
det_trial_unique_port_offset=0,
det_trial_id="1",
det_experiment_id="1",
det_agent_id="1",
det_cluster_id="uuid-123",
trial_seed=trial_seed,
trial_run_id=1,
allocation_id="",
managed_training=True,
test_mode=False,
on_cluster=False,
)
def fixtures_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "fixtures", path)
def repo_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../../", path)
def assert_equivalent_metrics(metrics_A: Dict[str, Any], metrics_B: Dict[str, Any]) -> None:
"""
Helper function to verify that two dictionaries of metrics are equivalent
to each other.
"""
assert set(metrics_A.keys()) == set(metrics_B.keys())
for key in metrics_A.keys():
if isinstance(metrics_A[key], (float, np.float)):
assert metrics_A[key] == pytest.approx(metrics_B[key])
elif isinstance(metrics_A[key], np.ndarray):
assert np.array_equal(metrics_A[key], metrics_B[key])
else:
assert metrics_A[key] == metrics_B[key]
def xor_data(dtype: np.dtype = np.int64) -> Tuple[np.ndarray, np.ndarray]:
training_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=dtype)
training_labels = np.array([0, 1, 1, 0], dtype=dtype)
return training_data, training_labels
def make_xor_data_sequences(
shuffle: bool = False,
seed: Optional[int] = None,
dtype: np.dtype = np.int64,
multi_input_output: bool = False,
batch_size: int = 1,
) -> Tuple[keras_utils.Sequence, keras_utils.Sequence]:
"""
Generates data loaders for the toy XOR problem. The dataset only has four
possible inputs. For the purposes of testing, the validation set is the
same as the training dataset.
"""
training_data, training_labels = xor_data(dtype)
if shuffle:
if seed is not None:
np.random.seed(seed)
idxs = np.random.permutation(4)
training_data = training_data[idxs]
training_labels = training_labels[idxs]
return (
keras._ArrayLikeAdapter(training_data, training_labels, batch_size=batch_size),
keras._ArrayLikeAdapter(training_data, training_labels, batch_size=batch_size),
)
def make_trial_controller_from_trial_implementation(
trial_class: Type[det.Trial],
hparams: Dict,
workloads: workload.Stream,
scheduling_unit: int = 1,
trial_seed: int = 0,
exp_config: Optional[Dict] = None,
checkpoint_dir: Optional[str] = None,
latest_checkpoint: Optional[str] = None,
steps_completed: int = 0,
expose_gpus: bool = False,
) -> det.TrialController:
if not exp_config:
assert hasattr(
trial_class, "_searcher_metric"
), "Trial classes for unit tests should be annotated with a _searcher_metric attribute"
searcher_metric = trial_class._searcher_metric # type: ignore
exp_config = make_default_exp_config(
hparams, scheduling_unit, searcher_metric, checkpoint_dir=checkpoint_dir
)
env = make_default_env_context(
hparams=hparams,
experiment_config=exp_config,
trial_seed=trial_seed,
latest_checkpoint=latest_checkpoint,
steps_completed=steps_completed,
expose_gpus=expose_gpus,
)
storage_manager = det.common.storage.SharedFSStorageManager(checkpoint_dir or "/tmp")
core_context = core._dummy_init(storage_manager=storage_manager)
distributed_backend = det._DistributedBackend()
controller_class = trial_class.trial_controller_class
assert controller_class is not None
controller_class.pre_execute_hook(env, distributed_backend)
trial_context = trial_class.trial_context_class(core_context, env)
trial_inst = trial_class(trial_context)
return controller_class.from_trial(
trial_inst=trial_inst,
context=trial_context,
env=env,
workloads=workloads,
)
def reproducibility_test(
controller_fn: Callable[[workload.Stream], det.TrialController],
steps: int,
validation_freq: int,
seed: int = 123,
scheduling_unit: int = 1,
) -> Tuple[
Tuple[Sequence[Dict[str, Any]], Sequence[Dict[str, Any]]],
Tuple[Sequence[Dict[str, Any]], Sequence[Dict[str, Any]]],
]:
training_metrics = {}
validation_metrics = {}
def make_workloads(tag: str) -> workload.Stream:
nonlocal training_metrics
nonlocal validation_metrics
trainer = TrainAndValidate()
yield from trainer.send(steps, validation_freq, scheduling_unit=scheduling_unit)
tm, vm = trainer.result()
training_metrics[tag] = tm
validation_metrics[tag] = vm
# Trial A
os.environ["DET_TRIAL_SEED"] = str(seed)
controller_A = controller_fn(make_workloads("A"))
controller_A.run()
# Trial B
assert os.environ["DET_TRIAL_SEED"] == str(seed)
controller_B = controller_fn(make_workloads("B"))
controller_B.run()
assert len(training_metrics["A"]) == len(training_metrics["B"])
for A, B in zip(training_metrics["A"], training_metrics["B"]):
assert_equivalent_metrics(A, B)
assert len(validation_metrics["A"]) == len(validation_metrics["B"])
for A, B in zip(validation_metrics["A"], validation_metrics["B"]):
assert_equivalent_metrics(A, B)
return (
(training_metrics["A"], validation_metrics["A"]),
(training_metrics["B"], validation_metrics["B"]),
)
RestorableMakeControllerFn = Callable[
[
workload.Stream,
DefaultNamedArg(Optional[str], "checkpoint_dir"), # noqa: F821
DefaultNamedArg(Optional[str], "latest_checkpoint"), # noqa: F821
DefaultNamedArg(int, "steps_completed"), # noqa: F821
],
det.TrialController,
]
def train_and_validate(
make_trial_controller_fn: Callable[[workload.Stream], det.TrialController],
steps: int = 2,
) -> Tuple[Sequence[Dict[str, Any]], Sequence[Dict[str, Any]]]:
metrics: Dict[str, Any] = {"training": [], "validation": []}
def make_workloads(steps: int) -> workload.Stream:
trainer = TrainAndValidate()
yield from trainer.send(steps, validation_freq=1, scheduling_unit=10)
tm, vm = trainer.result()
metrics["training"] += tm
metrics["validation"] += vm
controller = make_trial_controller_fn(make_workloads(steps))
controller.run()
return (metrics["training"], metrics["validation"])
def checkpointing_and_restoring_test(
make_trial_controller_fn: RestorableMakeControllerFn, tmp_path: pathlib.Path
) -> Tuple[Sequence[Dict[str, Any]], Sequence[Dict[str, Any]]]:
"""
Tests if a trial controller of any framework can checkpoint and restore from that checkpoint
without state changes.
This test runs two trials.
1) Trial A runs for one steps of 100 batches, checkpoints itself, and restores from
that checkpoint.
2) Trial B runs for two steps of 100 batches.
This test compares the training and validation metrics history of the two trials.
"""
training_metrics = {"A": [], "B": []} # type: Dict[str, List[workload.Metrics]]
validation_metrics = {"A": [], "B": []} # type: Dict[str, List[workload.Metrics]]
checkpoint_dir = str(tmp_path.joinpath("checkpoint"))
latest_checkpoint = None
steps_completed = 0
def make_workloads(steps: int, tag: str, checkpoint: bool) -> workload.Stream:
trainer = TrainAndValidate()
yield from trainer.send(steps, validation_freq=1, scheduling_unit=100)
tm, vm = trainer.result()
training_metrics[tag] += tm
validation_metrics[tag] += vm
if checkpoint is not None:
interceptor = workload.WorkloadResponseInterceptor()
yield from interceptor.send(workload.checkpoint_workload())
nonlocal latest_checkpoint, steps_completed
latest_checkpoint = interceptor.metrics_result()["uuid"]
steps_completed = trainer.get_steps_completed()
controller_A1 = make_trial_controller_fn(
make_workloads(1, "A", True),
checkpoint_dir=checkpoint_dir,
)
controller_A1.run()
assert latest_checkpoint is not None, "make_workloads did not set the latest_checkpoint"
controller_A2 = make_trial_controller_fn(
make_workloads(1, "A", False),
checkpoint_dir=checkpoint_dir,
latest_checkpoint=latest_checkpoint,
steps_completed=steps_completed,
)
controller_A2.run()
controller_B = make_trial_controller_fn(make_workloads(2, "B", False))
controller_B.run()
for A, B in zip(training_metrics["A"], training_metrics["B"]):
assert_equivalent_metrics(A, B)
for A, B in zip(validation_metrics["A"], validation_metrics["B"]):
assert_equivalent_metrics(A, B)
return (training_metrics["A"], training_metrics["B"])
def list_all_files(directory: str) -> List[str]:
return [f for _, _, files in os.walk(directory) for f in files]
def ensure_requires_global_batch_size(
trial_class: Type[det.Trial],
hparams: Dict[str, Any],
) -> None:
bad_hparams = dict(hparams)
del bad_hparams["global_batch_size"]
def make_workloads() -> workload.Stream:
trainer = TrainAndValidate()
yield from trainer.send(steps=1, validation_freq=1)
# Catch missing global_batch_size.
with pytest.raises(det.errors.InvalidExperimentException, match="is a required hyperparameter"):
_ = make_trial_controller_from_trial_implementation(
trial_class,
bad_hparams,
make_workloads(),
)
| 34.422989 | 100 | 0.656872 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.